ixgbe.c revision 1.347 1 1.347 yamaguch /* $NetBSD: ixgbe.c,v 1.347 2023/11/02 09:40:47 yamaguchi Exp $ */
2 1.99 msaitoh
3 1.1 dyoung /******************************************************************************
4 1.1 dyoung
5 1.99 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 dyoung All rights reserved.
7 1.99 msaitoh
8 1.99 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 dyoung modification, are permitted provided that the following conditions are met:
10 1.99 msaitoh
11 1.99 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 dyoung this list of conditions and the following disclaimer.
13 1.99 msaitoh
14 1.99 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.99 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 dyoung documentation and/or other materials provided with the distribution.
17 1.99 msaitoh
18 1.99 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.99 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 dyoung this software without specific prior written permission.
21 1.99 msaitoh
22 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.99 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.99 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.99 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.99 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.99 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.99 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.99 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.99 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 dyoung POSSIBILITY OF SUCH DAMAGE.
33 1.1 dyoung
34 1.1 dyoung ******************************************************************************/
35 1.145 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 1.99 msaitoh
37 1.1 dyoung /*
38 1.1 dyoung * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 dyoung * All rights reserved.
40 1.1 dyoung *
41 1.1 dyoung * This code is derived from software contributed to The NetBSD Foundation
42 1.1 dyoung * by Coyote Point Systems, Inc.
43 1.1 dyoung *
44 1.1 dyoung * Redistribution and use in source and binary forms, with or without
45 1.1 dyoung * modification, are permitted provided that the following conditions
46 1.1 dyoung * are met:
47 1.1 dyoung * 1. Redistributions of source code must retain the above copyright
48 1.1 dyoung * notice, this list of conditions and the following disclaimer.
49 1.1 dyoung * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 dyoung * notice, this list of conditions and the following disclaimer in the
51 1.1 dyoung * documentation and/or other materials provided with the distribution.
52 1.1 dyoung *
53 1.1 dyoung * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 dyoung * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 dyoung * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 dyoung * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 dyoung * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 dyoung * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 dyoung * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 dyoung * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 dyoung * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 dyoung * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 dyoung * POSSIBILITY OF SUCH DAMAGE.
64 1.1 dyoung */
65 1.1 dyoung
66 1.281 msaitoh #include <sys/cdefs.h>
67 1.347 yamaguch __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.347 2023/11/02 09:40:47 yamaguchi Exp $");
68 1.281 msaitoh
69 1.80 msaitoh #ifdef _KERNEL_OPT
70 1.1 dyoung #include "opt_inet.h"
71 1.22 msaitoh #include "opt_inet6.h"
72 1.80 msaitoh #include "opt_net_mpsafe.h"
73 1.80 msaitoh #endif
74 1.1 dyoung
75 1.1 dyoung #include "ixgbe.h"
76 1.251 msaitoh #include "ixgbe_phy.h"
77 1.135 msaitoh #include "ixgbe_sriov.h"
78 1.1 dyoung
79 1.33 msaitoh #include <sys/cprng.h>
80 1.95 msaitoh #include <dev/mii/mii.h>
81 1.95 msaitoh #include <dev/mii/miivar.h>
82 1.33 msaitoh
83 1.99 msaitoh /************************************************************************
84 1.99 msaitoh * Driver version
85 1.99 msaitoh ************************************************************************/
86 1.159 maxv static const char ixgbe_driver_version[] = "4.0.1-k";
87 1.301 msaitoh /* XXX NetBSD: + 3.3.24 */
88 1.1 dyoung
89 1.99 msaitoh /************************************************************************
90 1.99 msaitoh * PCI Device ID Table
91 1.1 dyoung *
92 1.99 msaitoh * Used by probe to select devices to load on
93 1.99 msaitoh * Last field stores an index into ixgbe_strings
94 1.99 msaitoh * Last entry must be all 0s
95 1.1 dyoung *
96 1.99 msaitoh * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 1.99 msaitoh ************************************************************************/
98 1.159 maxv static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 1.1 dyoung {
100 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 1.334 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0},
121 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
122 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
123 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
124 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
125 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
126 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
127 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
128 1.24 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
129 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
130 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
131 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
132 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
133 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
134 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
135 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
136 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
137 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
138 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
139 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
140 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
141 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
142 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
143 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
144 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
145 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
146 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
147 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
148 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
149 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
150 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
151 1.1 dyoung /* required last entry */
152 1.1 dyoung {0, 0, 0, 0, 0}
153 1.1 dyoung };
154 1.1 dyoung
155 1.99 msaitoh /************************************************************************
156 1.99 msaitoh * Table of branding strings
157 1.99 msaitoh ************************************************************************/
158 1.1 dyoung static const char *ixgbe_strings[] = {
159 1.1 dyoung "Intel(R) PRO/10GbE PCI-Express Network Driver"
160 1.1 dyoung };
161 1.1 dyoung
162 1.99 msaitoh /************************************************************************
163 1.99 msaitoh * Function prototypes
164 1.99 msaitoh ************************************************************************/
165 1.186 msaitoh static int ixgbe_probe(device_t, cfdata_t, void *);
166 1.333 msaitoh static void ixgbe_quirks(struct ixgbe_softc *);
167 1.186 msaitoh static void ixgbe_attach(device_t, device_t, void *);
168 1.186 msaitoh static int ixgbe_detach(device_t, int);
169 1.1 dyoung #if 0
170 1.186 msaitoh static int ixgbe_shutdown(device_t);
171 1.1 dyoung #endif
172 1.44 msaitoh static bool ixgbe_suspend(device_t, const pmf_qual_t *);
173 1.44 msaitoh static bool ixgbe_resume(device_t, const pmf_qual_t *);
174 1.98 msaitoh static int ixgbe_ifflags_cb(struct ethercom *);
175 1.186 msaitoh static int ixgbe_ioctl(struct ifnet *, u_long, void *);
176 1.1 dyoung static int ixgbe_init(struct ifnet *);
177 1.333 msaitoh static void ixgbe_init_locked(struct ixgbe_softc *);
178 1.232 msaitoh static void ixgbe_ifstop(struct ifnet *, int);
179 1.252 msaitoh static void ixgbe_stop_locked(void *);
180 1.333 msaitoh static void ixgbe_init_device_features(struct ixgbe_softc *);
181 1.333 msaitoh static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
182 1.333 msaitoh static void ixgbe_add_media_types(struct ixgbe_softc *);
183 1.186 msaitoh static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
184 1.186 msaitoh static int ixgbe_media_change(struct ifnet *);
185 1.333 msaitoh static int ixgbe_allocate_pci_resources(struct ixgbe_softc *,
186 1.1 dyoung const struct pci_attach_args *);
187 1.333 msaitoh static void ixgbe_free_deferred_handlers(struct ixgbe_softc *);
188 1.333 msaitoh static void ixgbe_get_slot_info(struct ixgbe_softc *);
189 1.333 msaitoh static int ixgbe_allocate_msix(struct ixgbe_softc *,
190 1.1 dyoung const struct pci_attach_args *);
191 1.333 msaitoh static int ixgbe_allocate_legacy(struct ixgbe_softc *,
192 1.1 dyoung const struct pci_attach_args *);
193 1.333 msaitoh static int ixgbe_configure_interrupts(struct ixgbe_softc *);
194 1.333 msaitoh static void ixgbe_free_pciintr_resources(struct ixgbe_softc *);
195 1.333 msaitoh static void ixgbe_free_pci_resources(struct ixgbe_softc *);
196 1.1 dyoung static void ixgbe_local_timer(void *);
197 1.233 msaitoh static void ixgbe_handle_timer(struct work *, void *);
198 1.186 msaitoh static void ixgbe_recovery_mode_timer(void *);
199 1.233 msaitoh static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
200 1.333 msaitoh static int ixgbe_setup_interface(device_t, struct ixgbe_softc *);
201 1.333 msaitoh static void ixgbe_config_gpie(struct ixgbe_softc *);
202 1.333 msaitoh static void ixgbe_config_dmac(struct ixgbe_softc *);
203 1.333 msaitoh static void ixgbe_config_delay_values(struct ixgbe_softc *);
204 1.333 msaitoh static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *);
205 1.333 msaitoh static void ixgbe_config_link(struct ixgbe_softc *);
206 1.333 msaitoh static void ixgbe_check_wol_support(struct ixgbe_softc *);
207 1.333 msaitoh static int ixgbe_setup_low_power_mode(struct ixgbe_softc *);
208 1.161 kamil #if 0
209 1.333 msaitoh static void ixgbe_rearm_queues(struct ixgbe_softc *, u64);
210 1.161 kamil #endif
211 1.1 dyoung
212 1.333 msaitoh static void ixgbe_initialize_transmit_units(struct ixgbe_softc *);
213 1.333 msaitoh static void ixgbe_initialize_receive_units(struct ixgbe_softc *);
214 1.333 msaitoh static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
215 1.333 msaitoh static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
216 1.333 msaitoh static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
217 1.333 msaitoh
218 1.333 msaitoh static void ixgbe_enable_intr(struct ixgbe_softc *);
219 1.333 msaitoh static void ixgbe_disable_intr(struct ixgbe_softc *);
220 1.333 msaitoh static void ixgbe_update_stats_counters(struct ixgbe_softc *);
221 1.333 msaitoh static void ixgbe_set_rxfilter(struct ixgbe_softc *);
222 1.333 msaitoh static void ixgbe_update_link_status(struct ixgbe_softc *);
223 1.333 msaitoh static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
224 1.333 msaitoh static void ixgbe_configure_ivars(struct ixgbe_softc *);
225 1.1 dyoung static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
226 1.333 msaitoh static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
227 1.1 dyoung
228 1.333 msaitoh static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *);
229 1.333 msaitoh static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *);
230 1.193 msaitoh static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
231 1.333 msaitoh static int ixgbe_register_vlan(struct ixgbe_softc *, u16);
232 1.333 msaitoh static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16);
233 1.1 dyoung
234 1.333 msaitoh static void ixgbe_add_device_sysctls(struct ixgbe_softc *);
235 1.333 msaitoh static void ixgbe_add_hw_stats(struct ixgbe_softc *);
236 1.333 msaitoh static void ixgbe_clear_evcnt(struct ixgbe_softc *);
237 1.333 msaitoh static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238 1.333 msaitoh static int ixgbe_set_advertise(struct ixgbe_softc *, int);
239 1.333 msaitoh static int ixgbe_get_default_advertise(struct ixgbe_softc *);
240 1.44 msaitoh
241 1.44 msaitoh /* Sysctl handlers */
242 1.52 msaitoh static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
243 1.52 msaitoh static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
244 1.186 msaitoh static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
245 1.44 msaitoh static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
246 1.44 msaitoh static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
247 1.44 msaitoh static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
248 1.48 msaitoh #ifdef IXGBE_DEBUG
249 1.48 msaitoh static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
250 1.48 msaitoh static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
251 1.48 msaitoh #endif
252 1.186 msaitoh static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
253 1.287 msaitoh static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
254 1.186 msaitoh static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
255 1.186 msaitoh static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
256 1.186 msaitoh static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
257 1.186 msaitoh static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
258 1.186 msaitoh static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
259 1.158 msaitoh static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
260 1.286 msaitoh static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
261 1.313 msaitoh static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
262 1.313 msaitoh static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
263 1.44 msaitoh static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
264 1.44 msaitoh static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
265 1.1 dyoung
266 1.277 msaitoh /* Interrupt functions */
267 1.34 msaitoh static int ixgbe_msix_que(void *);
268 1.233 msaitoh static int ixgbe_msix_admin(void *);
269 1.333 msaitoh static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *);
270 1.277 msaitoh static int ixgbe_legacy_irq(void *);
271 1.1 dyoung
272 1.233 msaitoh /* Event handlers running on workqueue */
273 1.1 dyoung static void ixgbe_handle_que(void *);
274 1.1 dyoung static void ixgbe_handle_link(void *);
275 1.233 msaitoh static void ixgbe_handle_msf(void *);
276 1.273 msaitoh static void ixgbe_handle_mod(void *, bool);
277 1.44 msaitoh static void ixgbe_handle_phy(void *);
278 1.1 dyoung
279 1.233 msaitoh /* Deferred workqueue handlers */
280 1.233 msaitoh static void ixgbe_handle_admin(struct work *, void *);
281 1.128 knakahar static void ixgbe_handle_que_work(struct work *, void *);
282 1.128 knakahar
283 1.159 maxv static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
284 1.1 dyoung
285 1.99 msaitoh /************************************************************************
286 1.99 msaitoh * NetBSD Device Interface Entry Points
287 1.99 msaitoh ************************************************************************/
288 1.333 msaitoh CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc),
289 1.1 dyoung ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
290 1.1 dyoung DVF_DETACH_SHUTDOWN);
291 1.1 dyoung
292 1.1 dyoung #if 0
293 1.44 msaitoh devclass_t ix_devclass;
294 1.44 msaitoh DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
295 1.1 dyoung
296 1.44 msaitoh MODULE_DEPEND(ix, pci, 1, 1, 1);
297 1.44 msaitoh MODULE_DEPEND(ix, ether, 1, 1, 1);
298 1.115 msaitoh #ifdef DEV_NETMAP
299 1.115 msaitoh MODULE_DEPEND(ix, netmap, 1, 1, 1);
300 1.115 msaitoh #endif
301 1.1 dyoung #endif
302 1.1 dyoung
303 1.1 dyoung /*
304 1.99 msaitoh * TUNEABLE PARAMETERS:
305 1.99 msaitoh */
306 1.1 dyoung
307 1.1 dyoung /*
308 1.99 msaitoh * AIM: Adaptive Interrupt Moderation
309 1.99 msaitoh * which means that the interrupt rate
310 1.99 msaitoh * is varied over time based on the
311 1.99 msaitoh * traffic for that interrupt vector
312 1.99 msaitoh */
313 1.73 msaitoh static bool ixgbe_enable_aim = true;
314 1.52 msaitoh #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
315 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
316 1.52 msaitoh "Enable adaptive interrupt moderation");
317 1.1 dyoung
318 1.22 msaitoh static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
319 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
320 1.52 msaitoh &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
321 1.1 dyoung
322 1.1 dyoung /* How many packets rxeof tries to clean at a time */
323 1.1 dyoung static int ixgbe_rx_process_limit = 256;
324 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
325 1.99 msaitoh &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
326 1.1 dyoung
327 1.28 msaitoh /* How many packets txeof tries to clean at a time */
328 1.28 msaitoh static int ixgbe_tx_process_limit = 256;
329 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
330 1.52 msaitoh &ixgbe_tx_process_limit, 0,
331 1.99 msaitoh "Maximum number of sent packets to process at a time, -1 means unlimited");
332 1.52 msaitoh
333 1.52 msaitoh /* Flow control setting, default to full */
334 1.52 msaitoh static int ixgbe_flow_control = ixgbe_fc_full;
335 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
336 1.52 msaitoh &ixgbe_flow_control, 0, "Default flow control used for all adapters");
337 1.52 msaitoh
338 1.179 msaitoh /* Which packet processing uses workqueue or softint */
339 1.128 knakahar static bool ixgbe_txrx_workqueue = false;
340 1.128 knakahar
341 1.1 dyoung /*
342 1.99 msaitoh * Smart speed setting, default to on
343 1.99 msaitoh * this only works as a compile option
344 1.99 msaitoh * right now as its during attach, set
345 1.99 msaitoh * this to 'ixgbe_smart_speed_off' to
346 1.99 msaitoh * disable.
347 1.99 msaitoh */
348 1.1 dyoung static int ixgbe_smart_speed = ixgbe_smart_speed_on;
349 1.1 dyoung
350 1.1 dyoung /*
351 1.99 msaitoh * MSI-X should be the default for best performance,
352 1.1 dyoung * but this allows it to be forced off for testing.
353 1.1 dyoung */
354 1.1 dyoung static int ixgbe_enable_msix = 1;
355 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
356 1.52 msaitoh "Enable MSI-X interrupts");
357 1.1 dyoung
358 1.1 dyoung /*
359 1.1 dyoung * Number of Queues, can be set to 0,
360 1.1 dyoung * it then autoconfigures based on the
361 1.1 dyoung * number of cpus with a max of 8. This
362 1.220 pgoyette * can be overridden manually here.
363 1.1 dyoung */
364 1.62 msaitoh static int ixgbe_num_queues = 0;
365 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
366 1.52 msaitoh "Number of queues to configure, 0 indicates autoconfigure");
367 1.1 dyoung
368 1.1 dyoung /*
369 1.99 msaitoh * Number of TX descriptors per ring,
370 1.99 msaitoh * setting higher than RX as this seems
371 1.99 msaitoh * the better performing choice.
372 1.99 msaitoh */
373 1.335 msaitoh static int ixgbe_txd = DEFAULT_TXD;
374 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
375 1.52 msaitoh "Number of transmit descriptors per queue");
376 1.1 dyoung
377 1.1 dyoung /* Number of RX descriptors per ring */
378 1.335 msaitoh static int ixgbe_rxd = DEFAULT_RXD;
379 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
380 1.52 msaitoh "Number of receive descriptors per queue");
381 1.33 msaitoh
382 1.33 msaitoh /*
383 1.99 msaitoh * Defining this on will allow the use
384 1.99 msaitoh * of unsupported SFP+ modules, note that
385 1.99 msaitoh * doing so you are on your own :)
386 1.99 msaitoh */
387 1.35 msaitoh static int allow_unsupported_sfp = false;
388 1.52 msaitoh #define TUNABLE_INT(__x, __y)
389 1.52 msaitoh TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
390 1.1 dyoung
391 1.99 msaitoh /*
392 1.99 msaitoh * Not sure if Flow Director is fully baked,
393 1.99 msaitoh * so we'll default to turning it off.
394 1.99 msaitoh */
395 1.99 msaitoh static int ixgbe_enable_fdir = 0;
396 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
397 1.99 msaitoh "Enable Flow Director");
398 1.99 msaitoh
399 1.99 msaitoh /* Legacy Transmit (single queue) */
400 1.99 msaitoh static int ixgbe_enable_legacy_tx = 0;
401 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
402 1.99 msaitoh &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
403 1.99 msaitoh
404 1.99 msaitoh /* Receive-Side Scaling */
405 1.99 msaitoh static int ixgbe_enable_rss = 1;
406 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
407 1.99 msaitoh "Enable Receive-Side Scaling (RSS)");
408 1.99 msaitoh
409 1.99 msaitoh #if 0
410 1.99 msaitoh static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
411 1.99 msaitoh static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
412 1.1 dyoung #endif
413 1.1 dyoung
414 1.80 msaitoh #ifdef NET_MPSAFE
415 1.80 msaitoh #define IXGBE_MPSAFE 1
416 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
417 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
418 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
419 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
420 1.80 msaitoh #else
421 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS 0
422 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS 0
423 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
424 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS 0
425 1.80 msaitoh #endif
426 1.128 knakahar #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
427 1.80 msaitoh
428 1.312 msaitoh /* Interval between reports of errors */
429 1.312 msaitoh static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
430 1.312 msaitoh
431 1.99 msaitoh /************************************************************************
432 1.99 msaitoh * ixgbe_initialize_rss_mapping
433 1.99 msaitoh ************************************************************************/
434 1.98 msaitoh static void
435 1.333 msaitoh ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
436 1.1 dyoung {
437 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
438 1.186 msaitoh u32 reta = 0, mrqc, rss_key[10];
439 1.186 msaitoh int queue_id, table_size, index_mult;
440 1.186 msaitoh int i, j;
441 1.186 msaitoh u32 rss_hash_config;
442 1.99 msaitoh
443 1.122 knakahar /* force use default RSS key. */
444 1.122 knakahar #ifdef __NetBSD__
445 1.122 knakahar rss_getkey((uint8_t *) &rss_key);
446 1.122 knakahar #else
447 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
448 1.99 msaitoh /* Fetch the configured RSS key */
449 1.99 msaitoh rss_getkey((uint8_t *) &rss_key);
450 1.99 msaitoh } else {
451 1.99 msaitoh /* set up random bits */
452 1.99 msaitoh cprng_fast(&rss_key, sizeof(rss_key));
453 1.99 msaitoh }
454 1.122 knakahar #endif
455 1.1 dyoung
456 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
457 1.98 msaitoh index_mult = 0x1;
458 1.98 msaitoh table_size = 128;
459 1.333 msaitoh switch (sc->hw.mac.type) {
460 1.98 msaitoh case ixgbe_mac_82598EB:
461 1.98 msaitoh index_mult = 0x11;
462 1.98 msaitoh break;
463 1.98 msaitoh case ixgbe_mac_X550:
464 1.98 msaitoh case ixgbe_mac_X550EM_x:
465 1.99 msaitoh case ixgbe_mac_X550EM_a:
466 1.98 msaitoh table_size = 512;
467 1.98 msaitoh break;
468 1.98 msaitoh default:
469 1.98 msaitoh break;
470 1.98 msaitoh }
471 1.1 dyoung
472 1.98 msaitoh /* Set up the redirection table */
473 1.99 msaitoh for (i = 0, j = 0; i < table_size; i++, j++) {
474 1.333 msaitoh if (j == sc->num_queues)
475 1.99 msaitoh j = 0;
476 1.99 msaitoh
477 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
478 1.99 msaitoh /*
479 1.99 msaitoh * Fetch the RSS bucket id for the given indirection
480 1.99 msaitoh * entry. Cap it at the number of configured buckets
481 1.99 msaitoh * (which is num_queues.)
482 1.99 msaitoh */
483 1.99 msaitoh queue_id = rss_get_indirection_to_bucket(i);
484 1.333 msaitoh queue_id = queue_id % sc->num_queues;
485 1.99 msaitoh } else
486 1.99 msaitoh queue_id = (j * index_mult);
487 1.99 msaitoh
488 1.98 msaitoh /*
489 1.98 msaitoh * The low 8 bits are for hash value (n+0);
490 1.98 msaitoh * The next 8 bits are for hash value (n+1), etc.
491 1.98 msaitoh */
492 1.98 msaitoh reta = reta >> 8;
493 1.98 msaitoh reta = reta | (((uint32_t) queue_id) << 24);
494 1.98 msaitoh if ((i & 3) == 3) {
495 1.98 msaitoh if (i < 128)
496 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
497 1.98 msaitoh else
498 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
499 1.99 msaitoh reta);
500 1.98 msaitoh reta = 0;
501 1.98 msaitoh }
502 1.98 msaitoh }
503 1.1 dyoung
504 1.98 msaitoh /* Now fill our hash function seeds */
505 1.99 msaitoh for (i = 0; i < 10; i++)
506 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
507 1.1 dyoung
508 1.98 msaitoh /* Perform hash on these packet types */
509 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
510 1.99 msaitoh rss_hash_config = rss_gethashconfig();
511 1.99 msaitoh else {
512 1.99 msaitoh /*
513 1.99 msaitoh * Disable UDP - IP fragments aren't currently being handled
514 1.99 msaitoh * and so we end up with a mix of 2-tuple and 4-tuple
515 1.99 msaitoh * traffic.
516 1.99 msaitoh */
517 1.99 msaitoh rss_hash_config = RSS_HASHTYPE_RSS_IPV4
518 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV4
519 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6
520 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6
521 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6_EX
522 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
523 1.99 msaitoh }
524 1.99 msaitoh
525 1.98 msaitoh mrqc = IXGBE_MRQC_RSSEN;
526 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
527 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
528 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
529 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
530 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
531 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
532 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
533 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
534 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
535 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
536 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
537 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
538 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
539 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
540 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
541 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
542 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
543 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
544 1.333 msaitoh mrqc |= ixgbe_get_mrqc(sc->iov_mode);
545 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
546 1.99 msaitoh } /* ixgbe_initialize_rss_mapping */
547 1.1 dyoung
548 1.99 msaitoh /************************************************************************
549 1.99 msaitoh * ixgbe_initialize_receive_units - Setup receive registers and features.
550 1.99 msaitoh ************************************************************************/
551 1.98 msaitoh #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
552 1.185 msaitoh
553 1.1 dyoung static void
554 1.333 msaitoh ixgbe_initialize_receive_units(struct ixgbe_softc *sc)
555 1.1 dyoung {
556 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
557 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
558 1.333 msaitoh struct ifnet *ifp = sc->ifp;
559 1.186 msaitoh int i, j;
560 1.98 msaitoh u32 bufsz, fctrl, srrctl, rxcsum;
561 1.98 msaitoh u32 hlreg;
562 1.98 msaitoh
563 1.98 msaitoh /*
564 1.98 msaitoh * Make sure receives are disabled while
565 1.98 msaitoh * setting up the descriptor ring
566 1.98 msaitoh */
567 1.98 msaitoh ixgbe_disable_rx(hw);
568 1.1 dyoung
569 1.98 msaitoh /* Enable broadcasts */
570 1.98 msaitoh fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
571 1.98 msaitoh fctrl |= IXGBE_FCTRL_BAM;
572 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) {
573 1.98 msaitoh fctrl |= IXGBE_FCTRL_DPF;
574 1.98 msaitoh fctrl |= IXGBE_FCTRL_PMCF;
575 1.98 msaitoh }
576 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
577 1.1 dyoung
578 1.98 msaitoh /* Set for Jumbo Frames? */
579 1.98 msaitoh hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
580 1.98 msaitoh if (ifp->if_mtu > ETHERMTU)
581 1.98 msaitoh hlreg |= IXGBE_HLREG0_JUMBOEN;
582 1.98 msaitoh else
583 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_JUMBOEN;
584 1.99 msaitoh
585 1.98 msaitoh #ifdef DEV_NETMAP
586 1.99 msaitoh /* CRC stripping is conditional in Netmap */
587 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
588 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP) &&
589 1.99 msaitoh !ix_crcstrip)
590 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
591 1.60 msaitoh else
592 1.99 msaitoh #endif /* DEV_NETMAP */
593 1.98 msaitoh hlreg |= IXGBE_HLREG0_RXCRCSTRP;
594 1.99 msaitoh
595 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
596 1.1 dyoung
597 1.333 msaitoh bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
598 1.99 msaitoh IXGBE_SRRCTL_BSIZEPKT_SHIFT;
599 1.1 dyoung
600 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++) {
601 1.98 msaitoh u64 rdba = rxr->rxdma.dma_paddr;
602 1.152 msaitoh u32 reg;
603 1.98 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
604 1.98 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
605 1.99 msaitoh j = rxr->me;
606 1.1 dyoung
607 1.98 msaitoh /* Setup the Base and Length of the Rx Descriptor Ring */
608 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
609 1.98 msaitoh (rdba & 0x00000000ffffffffULL));
610 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
611 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
612 1.333 msaitoh sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
613 1.1 dyoung
614 1.98 msaitoh /* Set up the SRRCTL register */
615 1.98 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
616 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
617 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
618 1.98 msaitoh srrctl |= bufsz;
619 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
620 1.47 msaitoh
621 1.98 msaitoh /* Set RQSMR (Receive Queue Statistic Mapping) register */
622 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
623 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
624 1.98 msaitoh reg |= i << (regshift * 8);
625 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
626 1.98 msaitoh
627 1.98 msaitoh /*
628 1.98 msaitoh * Set DROP_EN iff we have no flow control and >1 queue.
629 1.98 msaitoh * Note that srrctl was cleared shortly before during reset,
630 1.98 msaitoh * so we do not need to clear the bit, but do it just in case
631 1.98 msaitoh * this code is moved elsewhere.
632 1.98 msaitoh */
633 1.333 msaitoh if ((sc->num_queues > 1) &&
634 1.333 msaitoh (sc->hw.fc.requested_mode == ixgbe_fc_none))
635 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
636 1.319 msaitoh else
637 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
638 1.98 msaitoh
639 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
640 1.98 msaitoh
641 1.98 msaitoh /* Setup the HW Rx Head and Tail Descriptor Pointers */
642 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
643 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
644 1.98 msaitoh
645 1.98 msaitoh /* Set the driver rx tail address */
646 1.98 msaitoh rxr->tail = IXGBE_RDT(rxr->me);
647 1.98 msaitoh }
648 1.98 msaitoh
649 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
650 1.99 msaitoh u32 psrtype = IXGBE_PSRTYPE_TCPHDR
651 1.186 msaitoh | IXGBE_PSRTYPE_UDPHDR
652 1.186 msaitoh | IXGBE_PSRTYPE_IPV4HDR
653 1.186 msaitoh | IXGBE_PSRTYPE_IPV6HDR;
654 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
655 1.98 msaitoh }
656 1.98 msaitoh
657 1.98 msaitoh rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
658 1.98 msaitoh
659 1.333 msaitoh ixgbe_initialize_rss_mapping(sc);
660 1.98 msaitoh
661 1.333 msaitoh if (sc->num_queues > 1) {
662 1.98 msaitoh /* RSS and RX IPP Checksum are mutually exclusive */
663 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
664 1.98 msaitoh }
665 1.98 msaitoh
666 1.98 msaitoh if (ifp->if_capenable & IFCAP_RXCSUM)
667 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
668 1.98 msaitoh
669 1.98 msaitoh /* This is useful for calculating UDP/IP fragment checksums */
670 1.98 msaitoh if (!(rxcsum & IXGBE_RXCSUM_PCSD))
671 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_IPPCSE;
672 1.98 msaitoh
673 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
674 1.98 msaitoh
675 1.99 msaitoh } /* ixgbe_initialize_receive_units */
676 1.98 msaitoh
677 1.99 msaitoh /************************************************************************
678 1.99 msaitoh * ixgbe_initialize_transmit_units - Enable transmit units.
679 1.99 msaitoh ************************************************************************/
680 1.98 msaitoh static void
681 1.333 msaitoh ixgbe_initialize_transmit_units(struct ixgbe_softc *sc)
682 1.98 msaitoh {
683 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
684 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
685 1.144 msaitoh int i;
686 1.98 msaitoh
687 1.225 msaitoh INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
688 1.225 msaitoh
689 1.98 msaitoh /* Setup the Base and Length of the Tx Descriptor Ring */
690 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, txr++) {
691 1.99 msaitoh u64 tdba = txr->txdma.dma_paddr;
692 1.99 msaitoh u32 txctrl = 0;
693 1.152 msaitoh u32 tqsmreg, reg;
694 1.152 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
695 1.152 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
696 1.99 msaitoh int j = txr->me;
697 1.98 msaitoh
698 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
699 1.98 msaitoh (tdba & 0x00000000ffffffffULL));
700 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
701 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
702 1.333 msaitoh sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
703 1.98 msaitoh
704 1.152 msaitoh /*
705 1.152 msaitoh * Set TQSMR (Transmit Queue Statistic Mapping) register.
706 1.152 msaitoh * Register location is different between 82598 and others.
707 1.152 msaitoh */
708 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
709 1.152 msaitoh tqsmreg = IXGBE_TQSMR(regnum);
710 1.152 msaitoh else
711 1.152 msaitoh tqsmreg = IXGBE_TQSM(regnum);
712 1.152 msaitoh reg = IXGBE_READ_REG(hw, tqsmreg);
713 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
714 1.152 msaitoh reg |= i << (regshift * 8);
715 1.152 msaitoh IXGBE_WRITE_REG(hw, tqsmreg, reg);
716 1.152 msaitoh
717 1.98 msaitoh /* Setup the HW Tx Head and Tail descriptor pointers */
718 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
719 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
720 1.98 msaitoh
721 1.98 msaitoh /* Cache the tail address */
722 1.98 msaitoh txr->tail = IXGBE_TDT(j);
723 1.98 msaitoh
724 1.155 msaitoh txr->txr_no_space = false;
725 1.155 msaitoh
726 1.345 msaitoh /* Disable relax ordering */
727 1.98 msaitoh /*
728 1.98 msaitoh * Note: for X550 series devices, these registers are actually
729 1.295 andvar * prefixed with TPH_ instead of DCA_, but the addresses and
730 1.98 msaitoh * fields remain the same.
731 1.98 msaitoh */
732 1.98 msaitoh switch (hw->mac.type) {
733 1.98 msaitoh case ixgbe_mac_82598EB:
734 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
735 1.98 msaitoh break;
736 1.98 msaitoh default:
737 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
738 1.98 msaitoh break;
739 1.98 msaitoh }
740 1.98 msaitoh txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
741 1.98 msaitoh switch (hw->mac.type) {
742 1.98 msaitoh case ixgbe_mac_82598EB:
743 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
744 1.98 msaitoh break;
745 1.98 msaitoh default:
746 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
747 1.98 msaitoh break;
748 1.98 msaitoh }
749 1.98 msaitoh
750 1.98 msaitoh }
751 1.98 msaitoh
752 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
753 1.98 msaitoh u32 dmatxctl, rttdcs;
754 1.99 msaitoh
755 1.98 msaitoh dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
756 1.98 msaitoh dmatxctl |= IXGBE_DMATXCTL_TE;
757 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
758 1.98 msaitoh /* Disable arbiter to set MTQC */
759 1.98 msaitoh rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
760 1.98 msaitoh rttdcs |= IXGBE_RTTDCS_ARBDIS;
761 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC,
763 1.333 msaitoh ixgbe_get_mtqc(sc->iov_mode));
764 1.98 msaitoh rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
765 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
766 1.98 msaitoh }
767 1.99 msaitoh } /* ixgbe_initialize_transmit_units */
768 1.98 msaitoh
769 1.245 msaitoh static void
770 1.333 msaitoh ixgbe_quirks(struct ixgbe_softc *sc)
771 1.245 msaitoh {
772 1.333 msaitoh device_t dev = sc->dev;
773 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
774 1.245 msaitoh const char *vendor, *product;
775 1.245 msaitoh
776 1.248 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
777 1.248 msaitoh /*
778 1.248 msaitoh * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
779 1.248 msaitoh * MA10-ST0.
780 1.248 msaitoh */
781 1.248 msaitoh vendor = pmf_get_platform("system-vendor");
782 1.248 msaitoh product = pmf_get_platform("system-product");
783 1.245 msaitoh
784 1.248 msaitoh if ((vendor == NULL) || (product == NULL))
785 1.248 msaitoh return;
786 1.245 msaitoh
787 1.248 msaitoh if ((strcmp(vendor, "GIGABYTE") == 0) &&
788 1.248 msaitoh (strcmp(product, "MA10-ST0") == 0)) {
789 1.248 msaitoh aprint_verbose_dev(dev,
790 1.248 msaitoh "Enable SFP+ MOD_ABS inverse quirk\n");
791 1.333 msaitoh sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
792 1.248 msaitoh }
793 1.245 msaitoh }
794 1.245 msaitoh }
795 1.245 msaitoh
796 1.99 msaitoh /************************************************************************
797 1.99 msaitoh * ixgbe_attach - Device initialization routine
798 1.98 msaitoh *
799 1.99 msaitoh * Called when the driver is being loaded.
800 1.99 msaitoh * Identifies the type of hardware, allocates all resources
801 1.99 msaitoh * and initializes the hardware.
802 1.98 msaitoh *
803 1.99 msaitoh * return 0 on success, positive on failure
804 1.99 msaitoh ************************************************************************/
805 1.98 msaitoh static void
806 1.98 msaitoh ixgbe_attach(device_t parent, device_t dev, void *aux)
807 1.98 msaitoh {
808 1.333 msaitoh struct ixgbe_softc *sc;
809 1.98 msaitoh struct ixgbe_hw *hw;
810 1.186 msaitoh int error = -1;
811 1.98 msaitoh u32 ctrl_ext;
812 1.340 msaitoh u16 high, low, nvmreg, dev_caps;
813 1.99 msaitoh pcireg_t id, subid;
814 1.159 maxv const ixgbe_vendor_info_t *ent;
815 1.98 msaitoh struct pci_attach_args *pa = aux;
816 1.219 msaitoh bool unsupported_sfp = false;
817 1.98 msaitoh const char *str;
818 1.233 msaitoh char wqname[MAXCOMLEN];
819 1.99 msaitoh char buf[256];
820 1.98 msaitoh
821 1.98 msaitoh INIT_DEBUGOUT("ixgbe_attach: begin");
822 1.98 msaitoh
823 1.98 msaitoh /* Allocate, clear, and link in our adapter structure */
824 1.333 msaitoh sc = device_private(dev);
825 1.333 msaitoh sc->hw.back = sc;
826 1.333 msaitoh sc->dev = dev;
827 1.333 msaitoh hw = &sc->hw;
828 1.333 msaitoh sc->osdep.pc = pa->pa_pc;
829 1.333 msaitoh sc->osdep.tag = pa->pa_tag;
830 1.98 msaitoh if (pci_dma64_available(pa))
831 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat64;
832 1.98 msaitoh else
833 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat;
834 1.333 msaitoh sc->osdep.attached = false;
835 1.333 msaitoh sc->osdep.detaching = false;
836 1.98 msaitoh
837 1.98 msaitoh ent = ixgbe_lookup(pa);
838 1.98 msaitoh
839 1.98 msaitoh KASSERT(ent != NULL);
840 1.98 msaitoh
841 1.98 msaitoh aprint_normal(": %s, Version - %s\n",
842 1.98 msaitoh ixgbe_strings[ent->index], ixgbe_driver_version);
843 1.98 msaitoh
844 1.233 msaitoh /* Core Lock Init */
845 1.333 msaitoh IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
846 1.1 dyoung
847 1.233 msaitoh /* Set up the timer callout and workqueue */
848 1.333 msaitoh callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
849 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
850 1.333 msaitoh error = workqueue_create(&sc->timer_wq, wqname,
851 1.333 msaitoh ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
852 1.233 msaitoh IXGBE_TASKLET_WQ_FLAGS);
853 1.233 msaitoh if (error) {
854 1.233 msaitoh aprint_error_dev(dev,
855 1.233 msaitoh "could not create timer workqueue (%d)\n", error);
856 1.233 msaitoh goto err_out;
857 1.233 msaitoh }
858 1.1 dyoung
859 1.1 dyoung /* Determine hardware revision */
860 1.99 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
861 1.99 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
862 1.99 msaitoh
863 1.99 msaitoh hw->vendor_id = PCI_VENDOR(id);
864 1.99 msaitoh hw->device_id = PCI_PRODUCT(id);
865 1.99 msaitoh hw->revision_id =
866 1.99 msaitoh PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
867 1.99 msaitoh hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
868 1.99 msaitoh hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
869 1.99 msaitoh
870 1.248 msaitoh /* Set quirk flags */
871 1.333 msaitoh ixgbe_quirks(sc);
872 1.248 msaitoh
873 1.99 msaitoh /*
874 1.99 msaitoh * Make sure BUSMASTER is set
875 1.99 msaitoh */
876 1.99 msaitoh ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
877 1.99 msaitoh
878 1.99 msaitoh /* Do base PCI setup - map BAR0 */
879 1.333 msaitoh if (ixgbe_allocate_pci_resources(sc, pa)) {
880 1.99 msaitoh aprint_error_dev(dev, "Allocation of PCI resources failed\n");
881 1.99 msaitoh error = ENXIO;
882 1.99 msaitoh goto err_out;
883 1.99 msaitoh }
884 1.99 msaitoh
885 1.99 msaitoh /* let hardware know driver is loaded */
886 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
887 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
888 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
889 1.99 msaitoh
890 1.99 msaitoh /*
891 1.99 msaitoh * Initialize the shared code
892 1.99 msaitoh */
893 1.144 msaitoh if (ixgbe_init_shared_code(hw) != 0) {
894 1.319 msaitoh aprint_error_dev(dev,
895 1.319 msaitoh "Unable to initialize the shared code\n");
896 1.99 msaitoh error = ENXIO;
897 1.99 msaitoh goto err_out;
898 1.99 msaitoh }
899 1.1 dyoung
900 1.79 msaitoh switch (hw->mac.type) {
901 1.79 msaitoh case ixgbe_mac_82598EB:
902 1.79 msaitoh str = "82598EB";
903 1.79 msaitoh break;
904 1.79 msaitoh case ixgbe_mac_82599EB:
905 1.79 msaitoh str = "82599EB";
906 1.79 msaitoh break;
907 1.79 msaitoh case ixgbe_mac_X540:
908 1.79 msaitoh str = "X540";
909 1.79 msaitoh break;
910 1.79 msaitoh case ixgbe_mac_X550:
911 1.79 msaitoh str = "X550";
912 1.79 msaitoh break;
913 1.79 msaitoh case ixgbe_mac_X550EM_x:
914 1.246 msaitoh str = "X550EM X";
915 1.79 msaitoh break;
916 1.99 msaitoh case ixgbe_mac_X550EM_a:
917 1.99 msaitoh str = "X550EM A";
918 1.99 msaitoh break;
919 1.79 msaitoh default:
920 1.79 msaitoh str = "Unknown";
921 1.79 msaitoh break;
922 1.79 msaitoh }
923 1.79 msaitoh aprint_normal_dev(dev, "device %s\n", str);
924 1.79 msaitoh
925 1.99 msaitoh hw->allow_unsupported_sfp = allow_unsupported_sfp;
926 1.99 msaitoh
927 1.99 msaitoh /* Pick up the 82599 settings */
928 1.292 msaitoh if (hw->mac.type != ixgbe_mac_82598EB)
929 1.99 msaitoh hw->phy.smart_speed = ixgbe_smart_speed;
930 1.292 msaitoh
931 1.292 msaitoh /* Set the right number of segments */
932 1.292 msaitoh KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
933 1.333 msaitoh sc->num_segs = IXGBE_SCATTER_DEFAULT;
934 1.99 msaitoh
935 1.172 msaitoh /* Ensure SW/FW semaphore is free */
936 1.172 msaitoh ixgbe_init_swfw_semaphore(hw);
937 1.172 msaitoh
938 1.113 msaitoh hw->mac.ops.set_lan_id(hw);
939 1.333 msaitoh ixgbe_init_device_features(sc);
940 1.99 msaitoh
941 1.333 msaitoh if (ixgbe_configure_interrupts(sc)) {
942 1.1 dyoung error = ENXIO;
943 1.1 dyoung goto err_out;
944 1.1 dyoung }
945 1.1 dyoung
946 1.99 msaitoh /* Allocate multicast array memory. */
947 1.333 msaitoh sc->mta = malloc(sizeof(*sc->mta) *
948 1.215 chs MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949 1.99 msaitoh
950 1.99 msaitoh /* Enable WoL (if supported) */
951 1.333 msaitoh ixgbe_check_wol_support(sc);
952 1.99 msaitoh
953 1.193 msaitoh /* Register for VLAN events */
954 1.333 msaitoh ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb);
955 1.193 msaitoh
956 1.99 msaitoh /* Verify adapter fan is still functional (if applicable) */
957 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 1.99 msaitoh u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 1.333 msaitoh ixgbe_check_fan_failure(sc, esdp, FALSE);
960 1.99 msaitoh }
961 1.99 msaitoh
962 1.99 msaitoh /* Set an initial default flow control value */
963 1.99 msaitoh hw->fc.requested_mode = ixgbe_flow_control;
964 1.99 msaitoh
965 1.1 dyoung /* Do descriptor calc and sanity checks */
966 1.1 dyoung if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 1.1 dyoung ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 1.336 msaitoh aprint_error_dev(dev, "Invalid TX ring size (%d). "
969 1.336 msaitoh "It must be between %d and %d, "
970 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
971 1.336 msaitoh "Using default value of %d instead.\n",
972 1.336 msaitoh ixgbe_txd, MIN_TXD, MAX_TXD,
973 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
974 1.336 msaitoh DEFAULT_TXD);
975 1.333 msaitoh sc->num_tx_desc = DEFAULT_TXD;
976 1.1 dyoung } else
977 1.333 msaitoh sc->num_tx_desc = ixgbe_txd;
978 1.1 dyoung
979 1.1 dyoung if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 1.33 msaitoh ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 1.336 msaitoh aprint_error_dev(dev, "Invalid RX ring size (%d). "
982 1.336 msaitoh "It must be between %d and %d, "
983 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
984 1.336 msaitoh "Using default value of %d instead.\n",
985 1.336 msaitoh ixgbe_rxd, MIN_RXD, MAX_RXD,
986 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
987 1.336 msaitoh DEFAULT_RXD);
988 1.333 msaitoh sc->num_rx_desc = DEFAULT_RXD;
989 1.1 dyoung } else
990 1.333 msaitoh sc->num_rx_desc = ixgbe_rxd;
991 1.1 dyoung
992 1.313 msaitoh /* Sysctls for limiting the amount of work done in the taskqueues */
993 1.333 msaitoh sc->rx_process_limit
994 1.333 msaitoh = (ixgbe_rx_process_limit <= sc->num_rx_desc)
995 1.333 msaitoh ? ixgbe_rx_process_limit : sc->num_rx_desc;
996 1.333 msaitoh sc->tx_process_limit
997 1.333 msaitoh = (ixgbe_tx_process_limit <= sc->num_tx_desc)
998 1.333 msaitoh ? ixgbe_tx_process_limit : sc->num_tx_desc;
999 1.313 msaitoh
1000 1.286 msaitoh /* Set default high limit of copying mbuf in rxeof */
1001 1.333 msaitoh sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
1002 1.286 msaitoh
1003 1.1 dyoung /* Allocate our TX/RX Queues */
1004 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1005 1.1 dyoung error = ENOMEM;
1006 1.1 dyoung goto err_out;
1007 1.1 dyoung }
1008 1.1 dyoung
1009 1.99 msaitoh hw->phy.reset_if_overtemp = TRUE;
1010 1.99 msaitoh error = ixgbe_reset_hw(hw);
1011 1.99 msaitoh hw->phy.reset_if_overtemp = FALSE;
1012 1.237 msaitoh if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1013 1.99 msaitoh error = IXGBE_SUCCESS;
1014 1.237 msaitoh else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1015 1.324 msaitoh aprint_error_dev(dev,
1016 1.324 msaitoh "Unsupported SFP+ module type was detected.\n");
1017 1.219 msaitoh unsupported_sfp = true;
1018 1.219 msaitoh error = IXGBE_SUCCESS;
1019 1.1 dyoung } else if (error) {
1020 1.282 msaitoh aprint_error_dev(dev,
1021 1.282 msaitoh "Hardware initialization failed(error = %d)\n", error);
1022 1.1 dyoung error = EIO;
1023 1.1 dyoung goto err_late;
1024 1.1 dyoung }
1025 1.1 dyoung
1026 1.1 dyoung /* Make sure we have a good EEPROM before we read from it */
1027 1.333 msaitoh if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1028 1.48 msaitoh aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1029 1.1 dyoung error = EIO;
1030 1.1 dyoung goto err_late;
1031 1.1 dyoung }
1032 1.1 dyoung
1033 1.88 msaitoh aprint_normal("%s:", device_xname(dev));
1034 1.88 msaitoh /* NVM Image Version */
1035 1.169 msaitoh high = low = 0;
1036 1.88 msaitoh switch (hw->mac.type) {
1037 1.300 msaitoh case ixgbe_mac_82598EB:
1038 1.300 msaitoh /*
1039 1.300 msaitoh * Print version from the dev starter version (0x29). The
1040 1.300 msaitoh * location is the same as newer device's IXGBE_NVM_MAP_VER.
1041 1.300 msaitoh */
1042 1.300 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1043 1.300 msaitoh if (nvmreg == 0xffff)
1044 1.300 msaitoh break;
1045 1.300 msaitoh high = (nvmreg >> 12) & 0x0f;
1046 1.300 msaitoh low = (nvmreg >> 4) & 0xff;
1047 1.300 msaitoh id = nvmreg & 0x0f;
1048 1.300 msaitoh /*
1049 1.300 msaitoh * The following output might not be correct. Some 82598 cards
1050 1.300 msaitoh * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1051 1.300 msaitoh */
1052 1.300 msaitoh aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1053 1.300 msaitoh break;
1054 1.88 msaitoh case ixgbe_mac_X540:
1055 1.99 msaitoh case ixgbe_mac_X550EM_a:
1056 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1057 1.88 msaitoh if (nvmreg == 0xffff)
1058 1.88 msaitoh break;
1059 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1060 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1061 1.88 msaitoh id = nvmreg & 0x0f;
1062 1.107 msaitoh aprint_normal(" NVM Image Version %u.", high);
1063 1.107 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1064 1.107 msaitoh str = "%x";
1065 1.107 msaitoh else
1066 1.107 msaitoh str = "%02x";
1067 1.107 msaitoh aprint_normal(str, low);
1068 1.107 msaitoh aprint_normal(" ID 0x%x,", id);
1069 1.88 msaitoh break;
1070 1.88 msaitoh case ixgbe_mac_X550EM_x:
1071 1.88 msaitoh case ixgbe_mac_X550:
1072 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1073 1.88 msaitoh if (nvmreg == 0xffff)
1074 1.88 msaitoh break;
1075 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1076 1.88 msaitoh low = nvmreg & 0xff;
1077 1.107 msaitoh aprint_normal(" NVM Image Version %u.%02x,", high, low);
1078 1.88 msaitoh break;
1079 1.88 msaitoh default:
1080 1.88 msaitoh break;
1081 1.88 msaitoh }
1082 1.169 msaitoh hw->eeprom.nvm_image_ver_high = high;
1083 1.169 msaitoh hw->eeprom.nvm_image_ver_low = low;
1084 1.88 msaitoh
1085 1.88 msaitoh /* PHY firmware revision */
1086 1.88 msaitoh switch (hw->mac.type) {
1087 1.88 msaitoh case ixgbe_mac_X540:
1088 1.88 msaitoh case ixgbe_mac_X550:
1089 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1090 1.88 msaitoh if (nvmreg == 0xffff)
1091 1.88 msaitoh break;
1092 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1093 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1094 1.88 msaitoh id = nvmreg & 0x000f;
1095 1.114 msaitoh aprint_normal(" PHY FW Revision %u.", high);
1096 1.114 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1097 1.114 msaitoh str = "%x";
1098 1.114 msaitoh else
1099 1.114 msaitoh str = "%02x";
1100 1.114 msaitoh aprint_normal(str, low);
1101 1.114 msaitoh aprint_normal(" ID 0x%x,", id);
1102 1.88 msaitoh break;
1103 1.88 msaitoh default:
1104 1.88 msaitoh break;
1105 1.88 msaitoh }
1106 1.88 msaitoh
1107 1.88 msaitoh /* NVM Map version & OEM NVM Image version */
1108 1.88 msaitoh switch (hw->mac.type) {
1109 1.88 msaitoh case ixgbe_mac_X550:
1110 1.88 msaitoh case ixgbe_mac_X550EM_x:
1111 1.99 msaitoh case ixgbe_mac_X550EM_a:
1112 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1113 1.88 msaitoh if (nvmreg != 0xffff) {
1114 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1115 1.88 msaitoh low = nvmreg & 0x00ff;
1116 1.88 msaitoh aprint_normal(" NVM Map version %u.%02x,", high, low);
1117 1.88 msaitoh }
1118 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1119 1.107 msaitoh if (nvmreg != 0xffff) {
1120 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1121 1.88 msaitoh low = nvmreg & 0x00ff;
1122 1.88 msaitoh aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1123 1.88 msaitoh low);
1124 1.88 msaitoh }
1125 1.88 msaitoh break;
1126 1.88 msaitoh default:
1127 1.88 msaitoh break;
1128 1.88 msaitoh }
1129 1.88 msaitoh
1130 1.88 msaitoh /* Print the ETrackID */
1131 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1132 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1133 1.88 msaitoh aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1134 1.79 msaitoh
1135 1.307 msaitoh /* Printed Board Assembly number */
1136 1.307 msaitoh error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
1137 1.307 msaitoh aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
1138 1.307 msaitoh
1139 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
1140 1.333 msaitoh error = ixgbe_allocate_msix(sc, pa);
1141 1.119 msaitoh if (error) {
1142 1.119 msaitoh /* Free allocated queue structures first */
1143 1.333 msaitoh ixgbe_free_queues(sc);
1144 1.119 msaitoh
1145 1.119 msaitoh /* Fallback to legacy interrupt */
1146 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_MSI)
1147 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
1148 1.333 msaitoh sc->num_queues = 1;
1149 1.119 msaitoh
1150 1.119 msaitoh /* Allocate our TX/RX Queues again */
1151 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1152 1.119 msaitoh error = ENOMEM;
1153 1.119 msaitoh goto err_out;
1154 1.119 msaitoh }
1155 1.119 msaitoh }
1156 1.119 msaitoh }
1157 1.307 msaitoh
1158 1.169 msaitoh /* Recovery mode */
1159 1.333 msaitoh switch (sc->hw.mac.type) {
1160 1.169 msaitoh case ixgbe_mac_X550:
1161 1.169 msaitoh case ixgbe_mac_X550EM_x:
1162 1.169 msaitoh case ixgbe_mac_X550EM_a:
1163 1.169 msaitoh /* >= 2.00 */
1164 1.169 msaitoh if (hw->eeprom.nvm_image_ver_high >= 2) {
1165 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1166 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1167 1.169 msaitoh }
1168 1.169 msaitoh break;
1169 1.169 msaitoh default:
1170 1.169 msaitoh break;
1171 1.169 msaitoh }
1172 1.169 msaitoh
1173 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) == 0)
1174 1.333 msaitoh error = ixgbe_allocate_legacy(sc, pa);
1175 1.185 msaitoh if (error)
1176 1.99 msaitoh goto err_late;
1177 1.99 msaitoh
1178 1.119 msaitoh /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1179 1.333 msaitoh mutex_init(&(sc)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1180 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1181 1.333 msaitoh error = workqueue_create(&sc->admin_wq, wqname,
1182 1.333 msaitoh ixgbe_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
1183 1.223 thorpej IXGBE_TASKLET_WQ_FLAGS);
1184 1.223 thorpej if (error) {
1185 1.223 thorpej aprint_error_dev(dev,
1186 1.233 msaitoh "could not create admin workqueue (%d)\n", error);
1187 1.223 thorpej goto err_out;
1188 1.223 thorpej }
1189 1.119 msaitoh
1190 1.99 msaitoh error = ixgbe_start_hw(hw);
1191 1.25 msaitoh switch (error) {
1192 1.25 msaitoh case IXGBE_ERR_EEPROM_VERSION:
1193 1.319 msaitoh aprint_error_dev(dev,
1194 1.319 msaitoh "This device is a pre-production adapter/"
1195 1.1 dyoung "LOM. Please be aware there may be issues associated "
1196 1.48 msaitoh "with your hardware.\nIf you are experiencing problems "
1197 1.1 dyoung "please contact your Intel or hardware representative "
1198 1.1 dyoung "who provided you with this hardware.\n");
1199 1.25 msaitoh break;
1200 1.25 msaitoh default:
1201 1.25 msaitoh break;
1202 1.1 dyoung }
1203 1.1 dyoung
1204 1.116 msaitoh /* Setup OS specific network interface */
1205 1.333 msaitoh if (ixgbe_setup_interface(dev, sc) != 0)
1206 1.116 msaitoh goto err_late;
1207 1.116 msaitoh
1208 1.110 msaitoh /*
1209 1.110 msaitoh * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1210 1.110 msaitoh * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1211 1.110 msaitoh */
1212 1.110 msaitoh if (hw->phy.media_type == ixgbe_media_type_copper) {
1213 1.95 msaitoh uint16_t id1, id2;
1214 1.95 msaitoh int oui, model, rev;
1215 1.285 pgoyette char descr[MII_MAX_DESCR_LEN];
1216 1.95 msaitoh
1217 1.95 msaitoh id1 = hw->phy.id >> 16;
1218 1.95 msaitoh id2 = hw->phy.id & 0xffff;
1219 1.95 msaitoh oui = MII_OUI(id1, id2);
1220 1.95 msaitoh model = MII_MODEL(id2);
1221 1.95 msaitoh rev = MII_REV(id2);
1222 1.285 pgoyette mii_get_descr(descr, sizeof(descr), oui, model);
1223 1.285 pgoyette if (descr[0])
1224 1.299 msaitoh aprint_normal_dev(dev, "PHY: %s, rev. %d\n",
1225 1.299 msaitoh descr, rev);
1226 1.95 msaitoh else
1227 1.95 msaitoh aprint_normal_dev(dev,
1228 1.95 msaitoh "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1229 1.95 msaitoh oui, model, rev);
1230 1.95 msaitoh }
1231 1.95 msaitoh
1232 1.173 msaitoh /* Enable EEE power saving */
1233 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
1234 1.173 msaitoh hw->mac.ops.setup_eee(hw,
1235 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
1236 1.173 msaitoh
1237 1.52 msaitoh /* Enable power to the phy. */
1238 1.219 msaitoh if (!unsupported_sfp) {
1239 1.219 msaitoh /* Enable the optics for 82599 SFP+ fiber */
1240 1.219 msaitoh ixgbe_enable_tx_laser(hw);
1241 1.219 msaitoh
1242 1.219 msaitoh /*
1243 1.219 msaitoh * XXX Currently, ixgbe_set_phy_power() supports only copper
1244 1.219 msaitoh * PHY, so it's not required to test with !unsupported_sfp.
1245 1.219 msaitoh */
1246 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
1247 1.219 msaitoh }
1248 1.52 msaitoh
1249 1.1 dyoung /* Initialize statistics */
1250 1.333 msaitoh ixgbe_update_stats_counters(sc);
1251 1.1 dyoung
1252 1.98 msaitoh /* Check PCIE slot type/speed/width */
1253 1.333 msaitoh ixgbe_get_slot_info(sc);
1254 1.1 dyoung
1255 1.99 msaitoh /*
1256 1.99 msaitoh * Do time init and sysctl init here, but
1257 1.99 msaitoh * only on the first port of a bypass adapter.
1258 1.99 msaitoh */
1259 1.333 msaitoh ixgbe_bypass_init(sc);
1260 1.99 msaitoh
1261 1.99 msaitoh /* Set an initial dmac value */
1262 1.333 msaitoh sc->dmac = 0;
1263 1.99 msaitoh /* Set initial advertised speeds (if applicable) */
1264 1.333 msaitoh sc->advertise = ixgbe_get_default_advertise(sc);
1265 1.45 msaitoh
1266 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1267 1.99 msaitoh ixgbe_define_iov_schemas(dev, &error);
1268 1.44 msaitoh
1269 1.44 msaitoh /* Add sysctls */
1270 1.333 msaitoh ixgbe_add_device_sysctls(sc);
1271 1.333 msaitoh ixgbe_add_hw_stats(sc);
1272 1.44 msaitoh
1273 1.99 msaitoh /* For Netmap */
1274 1.333 msaitoh sc->init_locked = ixgbe_init_locked;
1275 1.333 msaitoh sc->stop_locked = ixgbe_stop_locked;
1276 1.99 msaitoh
1277 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
1278 1.333 msaitoh ixgbe_netmap_attach(sc);
1279 1.1 dyoung
1280 1.340 msaitoh /* Print some flags */
1281 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
1282 1.99 msaitoh aprint_verbose_dev(dev, "feature cap %s\n", buf);
1283 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
1284 1.99 msaitoh aprint_verbose_dev(dev, "feature ena %s\n", buf);
1285 1.340 msaitoh if (ixgbe_get_device_caps(hw, &dev_caps) == 0) {
1286 1.340 msaitoh snprintb(buf, sizeof(buf), IXGBE_DEVICE_CAPS_FLAGS, dev_caps);
1287 1.340 msaitoh aprint_verbose_dev(dev, "device cap %s\n", buf);
1288 1.340 msaitoh }
1289 1.44 msaitoh
1290 1.44 msaitoh if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1291 1.333 msaitoh pmf_class_network_register(dev, sc->ifp);
1292 1.44 msaitoh else
1293 1.44 msaitoh aprint_error_dev(dev, "couldn't establish power handler\n");
1294 1.44 msaitoh
1295 1.169 msaitoh /* Init recovery mode timer and state variable */
1296 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1297 1.333 msaitoh sc->recovery_mode = 0;
1298 1.169 msaitoh
1299 1.169 msaitoh /* Set up the timer callout */
1300 1.333 msaitoh callout_init(&sc->recovery_mode_timer,
1301 1.169 msaitoh IXGBE_CALLOUT_FLAGS);
1302 1.235 msaitoh snprintf(wqname, sizeof(wqname), "%s-recovery",
1303 1.235 msaitoh device_xname(dev));
1304 1.333 msaitoh error = workqueue_create(&sc->recovery_mode_timer_wq,
1305 1.333 msaitoh wqname, ixgbe_handle_recovery_mode_timer, sc,
1306 1.233 msaitoh IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1307 1.233 msaitoh if (error) {
1308 1.233 msaitoh aprint_error_dev(dev, "could not create "
1309 1.233 msaitoh "recovery_mode_timer workqueue (%d)\n", error);
1310 1.233 msaitoh goto err_out;
1311 1.233 msaitoh }
1312 1.169 msaitoh
1313 1.169 msaitoh /* Start the task */
1314 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
1315 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
1316 1.169 msaitoh }
1317 1.169 msaitoh
1318 1.1 dyoung INIT_DEBUGOUT("ixgbe_attach: end");
1319 1.333 msaitoh sc->osdep.attached = true;
1320 1.98 msaitoh
1321 1.1 dyoung return;
1322 1.43 msaitoh
1323 1.1 dyoung err_late:
1324 1.333 msaitoh ixgbe_free_queues(sc);
1325 1.1 dyoung err_out:
1326 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1327 1.99 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1328 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1329 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
1330 1.333 msaitoh ixgbe_free_pci_resources(sc);
1331 1.333 msaitoh if (sc->mta != NULL)
1332 1.333 msaitoh free(sc->mta, M_DEVBUF);
1333 1.333 msaitoh mutex_destroy(&(sc)->admin_mtx); /* XXX appropriate order? */
1334 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
1335 1.99 msaitoh
1336 1.1 dyoung return;
1337 1.99 msaitoh } /* ixgbe_attach */
1338 1.1 dyoung
1339 1.99 msaitoh /************************************************************************
1340 1.99 msaitoh * ixgbe_check_wol_support
1341 1.99 msaitoh *
1342 1.99 msaitoh * Checks whether the adapter's ports are capable of
1343 1.99 msaitoh * Wake On LAN by reading the adapter's NVM.
1344 1.1 dyoung *
1345 1.99 msaitoh * Sets each port's hw->wol_enabled value depending
1346 1.99 msaitoh * on the value read here.
1347 1.99 msaitoh ************************************************************************/
1348 1.98 msaitoh static void
1349 1.333 msaitoh ixgbe_check_wol_support(struct ixgbe_softc *sc)
1350 1.98 msaitoh {
1351 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1352 1.186 msaitoh u16 dev_caps = 0;
1353 1.1 dyoung
1354 1.98 msaitoh /* Find out WoL support for port */
1355 1.333 msaitoh sc->wol_support = hw->wol_enabled = 0;
1356 1.98 msaitoh ixgbe_get_device_caps(hw, &dev_caps);
1357 1.98 msaitoh if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1358 1.98 msaitoh ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1359 1.99 msaitoh hw->bus.func == 0))
1360 1.333 msaitoh sc->wol_support = hw->wol_enabled = 1;
1361 1.98 msaitoh
1362 1.98 msaitoh /* Save initial wake up filter configuration */
1363 1.333 msaitoh sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1364 1.98 msaitoh
1365 1.98 msaitoh return;
1366 1.99 msaitoh } /* ixgbe_check_wol_support */
1367 1.98 msaitoh
1368 1.99 msaitoh /************************************************************************
1369 1.99 msaitoh * ixgbe_setup_interface
1370 1.98 msaitoh *
1371 1.99 msaitoh * Setup networking device structure and register an interface.
1372 1.99 msaitoh ************************************************************************/
1373 1.1 dyoung static int
1374 1.333 msaitoh ixgbe_setup_interface(device_t dev, struct ixgbe_softc *sc)
1375 1.1 dyoung {
1376 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
1377 1.98 msaitoh struct ifnet *ifp;
1378 1.1 dyoung
1379 1.98 msaitoh INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1380 1.1 dyoung
1381 1.333 msaitoh ifp = sc->ifp = &ec->ec_if;
1382 1.98 msaitoh strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1383 1.98 msaitoh ifp->if_baudrate = IF_Gbps(10);
1384 1.98 msaitoh ifp->if_init = ixgbe_init;
1385 1.98 msaitoh ifp->if_stop = ixgbe_ifstop;
1386 1.333 msaitoh ifp->if_softc = sc;
1387 1.98 msaitoh ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1388 1.98 msaitoh #ifdef IXGBE_MPSAFE
1389 1.112 ozaki ifp->if_extflags = IFEF_MPSAFE;
1390 1.98 msaitoh #endif
1391 1.98 msaitoh ifp->if_ioctl = ixgbe_ioctl;
1392 1.98 msaitoh #if __FreeBSD_version >= 1100045
1393 1.98 msaitoh /* TSO parameters */
1394 1.98 msaitoh ifp->if_hw_tsomax = 65518;
1395 1.98 msaitoh ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1396 1.98 msaitoh ifp->if_hw_tsomaxsegsize = 2048;
1397 1.98 msaitoh #endif
1398 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1399 1.99 msaitoh #if 0
1400 1.99 msaitoh ixgbe_start_locked = ixgbe_legacy_start_locked;
1401 1.99 msaitoh #endif
1402 1.99 msaitoh } else {
1403 1.99 msaitoh ifp->if_transmit = ixgbe_mq_start;
1404 1.99 msaitoh #if 0
1405 1.99 msaitoh ixgbe_start_locked = ixgbe_mq_start_locked;
1406 1.29 msaitoh #endif
1407 1.99 msaitoh }
1408 1.99 msaitoh ifp->if_start = ixgbe_legacy_start;
1409 1.333 msaitoh IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
1410 1.98 msaitoh IFQ_SET_READY(&ifp->if_snd);
1411 1.98 msaitoh
1412 1.284 riastrad if_initialize(ifp);
1413 1.333 msaitoh sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
1414 1.98 msaitoh /*
1415 1.98 msaitoh * We use per TX queue softint, so if_deferred_start_init() isn't
1416 1.98 msaitoh * used.
1417 1.98 msaitoh */
1418 1.98 msaitoh
1419 1.333 msaitoh sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1420 1.98 msaitoh
1421 1.98 msaitoh /*
1422 1.98 msaitoh * Tell the upper layer(s) we support long frames.
1423 1.98 msaitoh */
1424 1.98 msaitoh ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1425 1.98 msaitoh
1426 1.98 msaitoh /* Set capability flags */
1427 1.98 msaitoh ifp->if_capabilities |= IFCAP_RXCSUM
1428 1.186 msaitoh | IFCAP_TXCSUM
1429 1.186 msaitoh | IFCAP_TSOv4
1430 1.186 msaitoh | IFCAP_TSOv6;
1431 1.98 msaitoh ifp->if_capenable = 0;
1432 1.98 msaitoh
1433 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1434 1.186 msaitoh | ETHERCAP_VLAN_HWCSUM
1435 1.186 msaitoh | ETHERCAP_JUMBO_MTU
1436 1.186 msaitoh | ETHERCAP_VLAN_MTU;
1437 1.98 msaitoh
1438 1.98 msaitoh /* Enable the above capabilities by default */
1439 1.98 msaitoh ec->ec_capenable = ec->ec_capabilities;
1440 1.98 msaitoh
1441 1.347 yamaguch ether_ifattach(ifp, sc->hw.mac.addr);
1442 1.347 yamaguch aprint_normal_dev(dev, "Ethernet address %s\n",
1443 1.347 yamaguch ether_sprintf(sc->hw.mac.addr));
1444 1.347 yamaguch ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1445 1.347 yamaguch
1446 1.98 msaitoh /*
1447 1.99 msaitoh * Don't turn this on by default, if vlans are
1448 1.99 msaitoh * created on another pseudo device (eg. lagg)
1449 1.99 msaitoh * then vlan events are not passed thru, breaking
1450 1.99 msaitoh * operation, but with HW FILTER off it works. If
1451 1.99 msaitoh * using vlans directly on the ixgbe driver you can
1452 1.99 msaitoh * enable this and get full hardware tag filtering.
1453 1.99 msaitoh */
1454 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1455 1.1 dyoung
1456 1.98 msaitoh /*
1457 1.98 msaitoh * Specify the media types supported by this adapter and register
1458 1.98 msaitoh * callbacks to update media and link information
1459 1.98 msaitoh */
1460 1.333 msaitoh ec->ec_ifmedia = &sc->media;
1461 1.333 msaitoh ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixgbe_media_change,
1462 1.333 msaitoh ixgbe_media_status, &sc->core_mtx);
1463 1.45 msaitoh
1464 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1465 1.333 msaitoh ixgbe_add_media_types(sc);
1466 1.49 msaitoh
1467 1.98 msaitoh /* Set autoselect media by default */
1468 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1469 1.1 dyoung
1470 1.156 ozaki if_register(ifp);
1471 1.156 ozaki
1472 1.98 msaitoh return (0);
1473 1.99 msaitoh } /* ixgbe_setup_interface */
1474 1.1 dyoung
1475 1.99 msaitoh /************************************************************************
1476 1.99 msaitoh * ixgbe_add_media_types
1477 1.99 msaitoh ************************************************************************/
1478 1.98 msaitoh static void
1479 1.333 msaitoh ixgbe_add_media_types(struct ixgbe_softc *sc)
1480 1.98 msaitoh {
1481 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1482 1.186 msaitoh u64 layer;
1483 1.1 dyoung
1484 1.333 msaitoh layer = sc->phy_layer;
1485 1.1 dyoung
1486 1.98 msaitoh #define ADD(mm, dd) \
1487 1.333 msaitoh ifmedia_add(&sc->media, IFM_ETHER | (mm), (dd), NULL);
1488 1.1 dyoung
1489 1.140 msaitoh ADD(IFM_NONE, 0);
1490 1.140 msaitoh
1491 1.98 msaitoh /* Media types with matching NetBSD media defines */
1492 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1493 1.98 msaitoh ADD(IFM_10G_T | IFM_FDX, 0);
1494 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1495 1.98 msaitoh ADD(IFM_1000_T | IFM_FDX, 0);
1496 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1497 1.98 msaitoh ADD(IFM_100_TX | IFM_FDX, 0);
1498 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1499 1.99 msaitoh ADD(IFM_10_T | IFM_FDX, 0);
1500 1.26 msaitoh
1501 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1502 1.319 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1503 1.98 msaitoh ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1504 1.1 dyoung
1505 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1506 1.98 msaitoh ADD(IFM_10G_LR | IFM_FDX, 0);
1507 1.319 msaitoh if (hw->phy.multispeed_fiber)
1508 1.98 msaitoh ADD(IFM_1000_LX | IFM_FDX, 0);
1509 1.98 msaitoh }
1510 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1511 1.98 msaitoh ADD(IFM_10G_SR | IFM_FDX, 0);
1512 1.319 msaitoh if (hw->phy.multispeed_fiber)
1513 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1514 1.319 msaitoh } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1515 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1516 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1517 1.98 msaitoh ADD(IFM_10G_CX4 | IFM_FDX, 0);
1518 1.1 dyoung
1519 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1520 1.98 msaitoh ADD(IFM_10G_KR | IFM_FDX, 0);
1521 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1522 1.180 msaitoh ADD(IFM_10G_KX4 | IFM_FDX, 0);
1523 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1524 1.98 msaitoh ADD(IFM_1000_KX | IFM_FDX, 0);
1525 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1526 1.99 msaitoh ADD(IFM_2500_KX | IFM_FDX, 0);
1527 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1528 1.103 msaitoh ADD(IFM_2500_T | IFM_FDX, 0);
1529 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T)
1530 1.103 msaitoh ADD(IFM_5000_T | IFM_FDX, 0);
1531 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1532 1.208 msaitoh ADD(IFM_1000_BX10 | IFM_FDX, 0);
1533 1.98 msaitoh /* XXX no ifmedia_set? */
1534 1.185 msaitoh
1535 1.98 msaitoh ADD(IFM_AUTO, 0);
1536 1.98 msaitoh
1537 1.98 msaitoh #undef ADD
1538 1.99 msaitoh } /* ixgbe_add_media_types */
1539 1.1 dyoung
1540 1.99 msaitoh /************************************************************************
1541 1.99 msaitoh * ixgbe_is_sfp
1542 1.99 msaitoh ************************************************************************/
1543 1.99 msaitoh static inline bool
1544 1.99 msaitoh ixgbe_is_sfp(struct ixgbe_hw *hw)
1545 1.99 msaitoh {
1546 1.99 msaitoh switch (hw->mac.type) {
1547 1.99 msaitoh case ixgbe_mac_82598EB:
1548 1.99 msaitoh if (hw->phy.type == ixgbe_phy_nl)
1549 1.144 msaitoh return (TRUE);
1550 1.144 msaitoh return (FALSE);
1551 1.99 msaitoh case ixgbe_mac_82599EB:
1552 1.203 msaitoh case ixgbe_mac_X550EM_x:
1553 1.203 msaitoh case ixgbe_mac_X550EM_a:
1554 1.99 msaitoh switch (hw->mac.ops.get_media_type(hw)) {
1555 1.99 msaitoh case ixgbe_media_type_fiber:
1556 1.99 msaitoh case ixgbe_media_type_fiber_qsfp:
1557 1.144 msaitoh return (TRUE);
1558 1.99 msaitoh default:
1559 1.144 msaitoh return (FALSE);
1560 1.99 msaitoh }
1561 1.99 msaitoh default:
1562 1.144 msaitoh return (FALSE);
1563 1.99 msaitoh }
1564 1.99 msaitoh } /* ixgbe_is_sfp */
1565 1.99 msaitoh
1566 1.226 thorpej static void
1567 1.333 msaitoh ixgbe_schedule_admin_tasklet(struct ixgbe_softc *sc)
1568 1.226 thorpej {
1569 1.243 msaitoh
1570 1.333 msaitoh KASSERT(mutex_owned(&sc->admin_mtx));
1571 1.260 knakahar
1572 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
1573 1.333 msaitoh if (sc->admin_pending == 0)
1574 1.333 msaitoh workqueue_enqueue(sc->admin_wq,
1575 1.333 msaitoh &sc->admin_wc, NULL);
1576 1.333 msaitoh sc->admin_pending = 1;
1577 1.255 msaitoh }
1578 1.226 thorpej }
1579 1.226 thorpej
1580 1.99 msaitoh /************************************************************************
1581 1.99 msaitoh * ixgbe_config_link
1582 1.99 msaitoh ************************************************************************/
1583 1.98 msaitoh static void
1584 1.333 msaitoh ixgbe_config_link(struct ixgbe_softc *sc)
1585 1.98 msaitoh {
1586 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1587 1.186 msaitoh u32 autoneg, err = 0;
1588 1.233 msaitoh u32 task_requests = 0;
1589 1.186 msaitoh bool sfp, negotiate = false;
1590 1.1 dyoung
1591 1.98 msaitoh sfp = ixgbe_is_sfp(hw);
1592 1.1 dyoung
1593 1.185 msaitoh if (sfp) {
1594 1.99 msaitoh if (hw->phy.multispeed_fiber) {
1595 1.99 msaitoh ixgbe_enable_tx_laser(hw);
1596 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1597 1.99 msaitoh }
1598 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1599 1.260 knakahar
1600 1.333 msaitoh mutex_enter(&sc->admin_mtx);
1601 1.333 msaitoh sc->task_requests |= task_requests;
1602 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
1603 1.333 msaitoh mutex_exit(&sc->admin_mtx);
1604 1.98 msaitoh } else {
1605 1.333 msaitoh struct ifmedia *ifm = &sc->media;
1606 1.143 msaitoh
1607 1.98 msaitoh if (hw->mac.ops.check_link)
1608 1.333 msaitoh err = ixgbe_check_link(hw, &sc->link_speed,
1609 1.333 msaitoh &sc->link_up, FALSE);
1610 1.98 msaitoh if (err)
1611 1.144 msaitoh return;
1612 1.143 msaitoh
1613 1.143 msaitoh /*
1614 1.143 msaitoh * Check if it's the first call. If it's the first call,
1615 1.143 msaitoh * get value for auto negotiation.
1616 1.143 msaitoh */
1617 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
1618 1.143 msaitoh if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1619 1.143 msaitoh && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1620 1.186 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1621 1.99 msaitoh &negotiate);
1622 1.98 msaitoh if (err)
1623 1.144 msaitoh return;
1624 1.98 msaitoh if (hw->mac.ops.setup_link)
1625 1.186 msaitoh err = hw->mac.ops.setup_link(hw, autoneg,
1626 1.333 msaitoh sc->link_up);
1627 1.98 msaitoh }
1628 1.99 msaitoh } /* ixgbe_config_link */
1629 1.98 msaitoh
1630 1.99 msaitoh /************************************************************************
1631 1.99 msaitoh * ixgbe_update_stats_counters - Update board statistics counters.
1632 1.99 msaitoh ************************************************************************/
1633 1.98 msaitoh static void
1634 1.333 msaitoh ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1635 1.1 dyoung {
1636 1.333 msaitoh struct ifnet *ifp = sc->ifp;
1637 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1638 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1639 1.305 msaitoh u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1640 1.304 msaitoh u64 total, total_missed_rx = 0;
1641 1.303 msaitoh uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1642 1.186 msaitoh unsigned int queue_counters;
1643 1.176 msaitoh int i;
1644 1.44 msaitoh
1645 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1646 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1647 1.303 msaitoh
1648 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1649 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1650 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
1651 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1652 1.44 msaitoh
1653 1.176 msaitoh /* 16 registers exist */
1654 1.333 msaitoh queue_counters = uimin(__arraycount(stats->qprc), sc->num_queues);
1655 1.176 msaitoh for (i = 0; i < queue_counters; i++) {
1656 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1657 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1658 1.329 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1659 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbrc[i],
1660 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)) +
1661 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32));
1662 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbtc[i],
1663 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)) +
1664 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32));
1665 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
1666 1.329 msaitoh } else {
1667 1.329 msaitoh /* 82598 */
1668 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBRC(i), qbrc[i]);
1669 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBTC(i), qbtc[i]);
1670 1.329 msaitoh }
1671 1.98 msaitoh }
1672 1.151 msaitoh
1673 1.175 msaitoh /* 8 registers exist */
1674 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1675 1.98 msaitoh uint32_t mp;
1676 1.44 msaitoh
1677 1.151 msaitoh /* MPC */
1678 1.98 msaitoh mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1679 1.98 msaitoh /* global total per queue */
1680 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpc[i], mp);
1681 1.98 msaitoh /* running comprehensive total for stats display */
1682 1.98 msaitoh total_missed_rx += mp;
1683 1.44 msaitoh
1684 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1685 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1686 1.151 msaitoh
1687 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1688 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1689 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1690 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1691 1.319 msaitoh IXGBE_PXONRXCNT(i), pxonrxc[i]);
1692 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1693 1.319 msaitoh IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1694 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1695 1.319 msaitoh IXGBE_PXON2OFFCNT(i), pxon2offc[i]);
1696 1.151 msaitoh } else {
1697 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1698 1.319 msaitoh IXGBE_PXONRXC(i), pxonrxc[i]);
1699 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1700 1.319 msaitoh IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1701 1.151 msaitoh }
1702 1.98 msaitoh }
1703 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1704 1.44 msaitoh
1705 1.98 msaitoh /* Document says M[LR]FC are valid when link is up and 10Gbps */
1706 1.333 msaitoh if ((sc->link_active == LINK_STATE_UP)
1707 1.333 msaitoh && (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1708 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1709 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1710 1.98 msaitoh }
1711 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
1712 1.326 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LINK_DN_CNT, link_dn_cnt);
1713 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1714 1.44 msaitoh
1715 1.98 msaitoh /* Hardware workaround, gprc counts missed packets */
1716 1.305 msaitoh IXGBE_EVC_ADD(&stats->gprc,
1717 1.305 msaitoh IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1718 1.44 msaitoh
1719 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1720 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1721 1.305 msaitoh total = lxontxc + lxofftxc;
1722 1.44 msaitoh
1723 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1724 1.305 msaitoh IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1725 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1726 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1727 1.280 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1728 1.305 msaitoh - total * ETHER_MIN_LEN);
1729 1.305 msaitoh IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1730 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1731 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1732 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1733 1.98 msaitoh } else {
1734 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1735 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1736 1.98 msaitoh /* 82598 only has a counter in the high register */
1737 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1738 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1739 1.305 msaitoh - total * ETHER_MIN_LEN);
1740 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1741 1.98 msaitoh }
1742 1.44 msaitoh
1743 1.98 msaitoh /*
1744 1.98 msaitoh * Workaround: mprc hardware is incorrectly counting
1745 1.98 msaitoh * broadcasts, so for now we subtract those.
1746 1.98 msaitoh */
1747 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1748 1.305 msaitoh IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1749 1.305 msaitoh - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1750 1.305 msaitoh
1751 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1752 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1753 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1754 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1755 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1756 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1757 1.305 msaitoh
1758 1.305 msaitoh IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1759 1.305 msaitoh IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1760 1.305 msaitoh IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1761 1.305 msaitoh
1762 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1763 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1764 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1765 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1766 1.305 msaitoh
1767 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1768 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1769 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1770 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1771 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1772 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1773 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1774 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1775 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1776 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1777 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1778 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1779 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1780 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1781 1.98 msaitoh /* Only read FCOE on 82599 */
1782 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1783 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1784 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1785 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1786 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1787 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1788 1.98 msaitoh }
1789 1.44 msaitoh
1790 1.44 msaitoh /*
1791 1.224 msaitoh * Fill out the OS statistics structure. Only RX errors are required
1792 1.224 msaitoh * here because all TX counters are incremented in the TX path and
1793 1.224 msaitoh * normal RX counters are prepared in ether_input().
1794 1.44 msaitoh */
1795 1.222 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1796 1.222 thorpej if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1797 1.298 msaitoh
1798 1.298 msaitoh /*
1799 1.298 msaitoh * Aggregate following types of errors as RX errors:
1800 1.298 msaitoh * - CRC error count,
1801 1.298 msaitoh * - illegal byte error count,
1802 1.298 msaitoh * - length error count,
1803 1.298 msaitoh * - undersized packets count,
1804 1.298 msaitoh * - fragmented packets count,
1805 1.298 msaitoh * - oversized packets count,
1806 1.298 msaitoh * - jabber count.
1807 1.298 msaitoh */
1808 1.298 msaitoh if_statadd_ref(nsr, if_ierrors,
1809 1.303 msaitoh crcerrs + illerrc + rlec + ruc + rfc + roc + rjc);
1810 1.298 msaitoh
1811 1.222 thorpej IF_STAT_PUTREF(ifp);
1812 1.99 msaitoh } /* ixgbe_update_stats_counters */
1813 1.1 dyoung
1814 1.99 msaitoh /************************************************************************
1815 1.99 msaitoh * ixgbe_add_hw_stats
1816 1.99 msaitoh *
1817 1.99 msaitoh * Add sysctl variables, one per statistic, to the system.
1818 1.99 msaitoh ************************************************************************/
1819 1.98 msaitoh static void
1820 1.333 msaitoh ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1821 1.1 dyoung {
1822 1.333 msaitoh device_t dev = sc->dev;
1823 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
1824 1.333 msaitoh struct sysctllog **log = &sc->sysctllog;
1825 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
1826 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
1827 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1828 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1829 1.98 msaitoh const char *xname = device_xname(dev);
1830 1.144 msaitoh int i;
1831 1.1 dyoung
1832 1.98 msaitoh /* Driver Statistics */
1833 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1834 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EFBIG");
1835 1.333 msaitoh evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1836 1.98 msaitoh NULL, xname, "m_defrag() failed");
1837 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1838 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EFBIG");
1839 1.333 msaitoh evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1840 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EINVAL");
1841 1.333 msaitoh evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
1842 1.98 msaitoh NULL, xname, "Driver tx dma hard fail other");
1843 1.333 msaitoh evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1844 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EAGAIN");
1845 1.333 msaitoh evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1846 1.98 msaitoh NULL, xname, "Driver tx dma soft fail ENOMEM");
1847 1.333 msaitoh evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
1848 1.98 msaitoh NULL, xname, "Watchdog timeouts");
1849 1.333 msaitoh evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
1850 1.98 msaitoh NULL, xname, "TSO errors");
1851 1.333 msaitoh evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
1852 1.233 msaitoh NULL, xname, "Admin MSI-X IRQ Handled");
1853 1.333 msaitoh evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
1854 1.233 msaitoh NULL, xname, "Link event");
1855 1.333 msaitoh evcnt_attach_dynamic(&sc->mod_workev, EVCNT_TYPE_INTR,
1856 1.233 msaitoh NULL, xname, "SFP+ module event");
1857 1.333 msaitoh evcnt_attach_dynamic(&sc->msf_workev, EVCNT_TYPE_INTR,
1858 1.233 msaitoh NULL, xname, "Multispeed event");
1859 1.333 msaitoh evcnt_attach_dynamic(&sc->phy_workev, EVCNT_TYPE_INTR,
1860 1.233 msaitoh NULL, xname, "External PHY event");
1861 1.1 dyoung
1862 1.168 msaitoh /* Max number of traffic class is 8 */
1863 1.168 msaitoh KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1864 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1865 1.333 msaitoh snprintf(sc->tcs[i].evnamebuf,
1866 1.333 msaitoh sizeof(sc->tcs[i].evnamebuf), "%s tc%d", xname, i);
1867 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
1868 1.168 msaitoh evcnt_attach_dynamic(&stats->mpc[i],
1869 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1870 1.168 msaitoh "RX Missed Packet Count");
1871 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1872 1.168 msaitoh evcnt_attach_dynamic(&stats->rnbc[i],
1873 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1874 1.333 msaitoh sc->tcs[i].evnamebuf,
1875 1.168 msaitoh "Receive No Buffers");
1876 1.168 msaitoh }
1877 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
1878 1.168 msaitoh evcnt_attach_dynamic(&stats->pxontxc[i],
1879 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1880 1.331 msaitoh "Priority XON Transmitted");
1881 1.168 msaitoh evcnt_attach_dynamic(&stats->pxofftxc[i],
1882 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1883 1.331 msaitoh "Priority XOFF Transmitted");
1884 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
1885 1.168 msaitoh evcnt_attach_dynamic(&stats->pxon2offc[i],
1886 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1887 1.333 msaitoh sc->tcs[i].evnamebuf,
1888 1.331 msaitoh "Priority XON to XOFF");
1889 1.330 msaitoh evcnt_attach_dynamic(&stats->pxonrxc[i],
1890 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1891 1.331 msaitoh "Priority XON Received");
1892 1.330 msaitoh evcnt_attach_dynamic(&stats->pxoffrxc[i],
1893 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1894 1.331 msaitoh "Priority XOFF Received");
1895 1.168 msaitoh }
1896 1.168 msaitoh }
1897 1.168 msaitoh
1898 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
1899 1.135 msaitoh #ifdef LRO
1900 1.135 msaitoh struct lro_ctrl *lro = &rxr->lro;
1901 1.327 msaitoh #endif
1902 1.135 msaitoh
1903 1.333 msaitoh snprintf(sc->queues[i].evnamebuf,
1904 1.333 msaitoh sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
1905 1.333 msaitoh snprintf(sc->queues[i].namebuf,
1906 1.333 msaitoh sizeof(sc->queues[i].namebuf), "q%d", i);
1907 1.1 dyoung
1908 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
1909 1.319 msaitoh aprint_error_dev(dev,
1910 1.319 msaitoh "could not create sysctl root\n");
1911 1.98 msaitoh break;
1912 1.98 msaitoh }
1913 1.1 dyoung
1914 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &rnode,
1915 1.98 msaitoh 0, CTLTYPE_NODE,
1916 1.333 msaitoh sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1917 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1918 1.98 msaitoh break;
1919 1.23 msaitoh
1920 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1921 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
1922 1.98 msaitoh "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1923 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler, 0,
1924 1.333 msaitoh (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1925 1.98 msaitoh break;
1926 1.1 dyoung
1927 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1928 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1929 1.98 msaitoh "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1930 1.98 msaitoh ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1931 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1932 1.98 msaitoh break;
1933 1.1 dyoung
1934 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1935 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1936 1.98 msaitoh "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1937 1.98 msaitoh ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1938 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1939 1.98 msaitoh break;
1940 1.1 dyoung
1941 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1942 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1943 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor next to check"),
1944 1.280 msaitoh ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1945 1.154 msaitoh CTL_CREATE, CTL_EOL) != 0)
1946 1.154 msaitoh break;
1947 1.154 msaitoh
1948 1.154 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1949 1.287 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1950 1.287 msaitoh SYSCTL_DESCR("Receive Descriptor next to refresh"),
1951 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1952 1.287 msaitoh CTL_CREATE, CTL_EOL) != 0)
1953 1.287 msaitoh break;
1954 1.287 msaitoh
1955 1.287 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1956 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1957 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Head"),
1958 1.98 msaitoh ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1959 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1960 1.33 msaitoh break;
1961 1.98 msaitoh
1962 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1963 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1964 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Tail"),
1965 1.98 msaitoh ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1966 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1967 1.28 msaitoh break;
1968 1.98 msaitoh
1969 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
1970 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "IRQs on queue");
1971 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].handleq,
1972 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
1973 1.327 msaitoh "Handled queue in softint");
1974 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
1975 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Requeued in softint");
1976 1.327 msaitoh if (i < __arraycount(stats->qbtc))
1977 1.327 msaitoh evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1978 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1979 1.328 msaitoh "Queue Bytes Transmitted (reg)");
1980 1.327 msaitoh evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1981 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1982 1.328 msaitoh "Queue Packets Transmitted (soft)");
1983 1.327 msaitoh if (i < __arraycount(stats->qptc))
1984 1.280 msaitoh evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1985 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1986 1.328 msaitoh "Queue Packets Transmitted (reg)");
1987 1.327 msaitoh #ifndef IXGBE_LEGACY_TX
1988 1.327 msaitoh evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1989 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1990 1.327 msaitoh "Packets dropped in pcq");
1991 1.327 msaitoh #endif
1992 1.327 msaitoh evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1993 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1994 1.327 msaitoh "TX Queue No Descriptor Available");
1995 1.327 msaitoh evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1996 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "TSO");
1997 1.327 msaitoh
1998 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1999 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2000 1.328 msaitoh "Queue Bytes Received (soft)");
2001 1.327 msaitoh if (i < __arraycount(stats->qbrc))
2002 1.280 msaitoh evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
2003 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2004 1.328 msaitoh "Queue Bytes Received (reg)");
2005 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2006 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2007 1.328 msaitoh "Queue Packets Received (soft)");
2008 1.327 msaitoh if (i < __arraycount(stats->qprc))
2009 1.327 msaitoh evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
2010 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2011 1.328 msaitoh "Queue Packets Received (reg)");
2012 1.327 msaitoh if ((i < __arraycount(stats->qprdc)) &&
2013 1.327 msaitoh (hw->mac.type >= ixgbe_mac_82599EB))
2014 1.151 msaitoh evcnt_attach_dynamic(&stats->qprdc[i],
2015 1.151 msaitoh EVCNT_TYPE_MISC, NULL,
2016 1.333 msaitoh sc->queues[i].evnamebuf,
2017 1.328 msaitoh "Queue Packets Received Drop");
2018 1.33 msaitoh
2019 1.290 msaitoh evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
2020 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
2021 1.98 msaitoh evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2022 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx discarded");
2023 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2024 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
2025 1.98 msaitoh #ifdef LRO
2026 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2027 1.98 msaitoh CTLFLAG_RD, &lro->lro_queued, 0,
2028 1.98 msaitoh "LRO Queued");
2029 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2030 1.98 msaitoh CTLFLAG_RD, &lro->lro_flushed, 0,
2031 1.98 msaitoh "LRO Flushed");
2032 1.98 msaitoh #endif /* LRO */
2033 1.1 dyoung }
2034 1.28 msaitoh
2035 1.99 msaitoh /* MAC stats get their own sub node */
2036 1.98 msaitoh
2037 1.98 msaitoh snprintf(stats->namebuf,
2038 1.98 msaitoh sizeof(stats->namebuf), "%s MAC Statistics", xname);
2039 1.98 msaitoh
2040 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2041 1.98 msaitoh stats->namebuf, "rx csum offload - IP");
2042 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2043 1.98 msaitoh stats->namebuf, "rx csum offload - L4");
2044 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2045 1.98 msaitoh stats->namebuf, "rx csum offload - IP bad");
2046 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2047 1.98 msaitoh stats->namebuf, "rx csum offload - L4 bad");
2048 1.98 msaitoh evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
2049 1.98 msaitoh stats->namebuf, "Interrupt conditions zero");
2050 1.98 msaitoh evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
2051 1.98 msaitoh stats->namebuf, "Legacy interrupts");
2052 1.99 msaitoh
2053 1.98 msaitoh evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2054 1.98 msaitoh stats->namebuf, "CRC Errors");
2055 1.98 msaitoh evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2056 1.98 msaitoh stats->namebuf, "Illegal Byte Errors");
2057 1.98 msaitoh evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2058 1.98 msaitoh stats->namebuf, "Byte Errors");
2059 1.98 msaitoh evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2060 1.98 msaitoh stats->namebuf, "MAC Short Packets Discarded");
2061 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2062 1.98 msaitoh evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2063 1.98 msaitoh stats->namebuf, "Bad SFD");
2064 1.98 msaitoh evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2065 1.98 msaitoh stats->namebuf, "Total Packets Missed");
2066 1.98 msaitoh evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2067 1.98 msaitoh stats->namebuf, "MAC Local Faults");
2068 1.98 msaitoh evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2069 1.98 msaitoh stats->namebuf, "MAC Remote Faults");
2070 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2071 1.326 msaitoh evcnt_attach_dynamic(&stats->link_dn_cnt, EVCNT_TYPE_MISC,
2072 1.326 msaitoh NULL, stats->namebuf, "Link down event in the MAC");
2073 1.98 msaitoh evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2074 1.98 msaitoh stats->namebuf, "Receive Length Errors");
2075 1.98 msaitoh evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2076 1.98 msaitoh stats->namebuf, "Link XON Transmitted");
2077 1.330 msaitoh evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2078 1.330 msaitoh stats->namebuf, "Link XOFF Transmitted");
2079 1.98 msaitoh evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2080 1.98 msaitoh stats->namebuf, "Link XON Received");
2081 1.98 msaitoh evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2082 1.98 msaitoh stats->namebuf, "Link XOFF Received");
2083 1.98 msaitoh
2084 1.98 msaitoh /* Packet Reception Stats */
2085 1.98 msaitoh evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2086 1.98 msaitoh stats->namebuf, "Total Octets Received");
2087 1.98 msaitoh evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2088 1.98 msaitoh stats->namebuf, "Good Octets Received");
2089 1.98 msaitoh evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2090 1.98 msaitoh stats->namebuf, "Total Packets Received");
2091 1.98 msaitoh evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2092 1.98 msaitoh stats->namebuf, "Good Packets Received");
2093 1.98 msaitoh evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2094 1.98 msaitoh stats->namebuf, "Multicast Packets Received");
2095 1.98 msaitoh evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2096 1.98 msaitoh stats->namebuf, "Broadcast Packets Received");
2097 1.98 msaitoh evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2098 1.98 msaitoh stats->namebuf, "64 byte frames received ");
2099 1.98 msaitoh evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2100 1.98 msaitoh stats->namebuf, "65-127 byte frames received");
2101 1.98 msaitoh evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2102 1.98 msaitoh stats->namebuf, "128-255 byte frames received");
2103 1.98 msaitoh evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2104 1.98 msaitoh stats->namebuf, "256-511 byte frames received");
2105 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2106 1.98 msaitoh stats->namebuf, "512-1023 byte frames received");
2107 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2108 1.98 msaitoh stats->namebuf, "1023-1522 byte frames received");
2109 1.98 msaitoh evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2110 1.98 msaitoh stats->namebuf, "Receive Undersized");
2111 1.98 msaitoh evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2112 1.98 msaitoh stats->namebuf, "Fragmented Packets Received ");
2113 1.98 msaitoh evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2114 1.98 msaitoh stats->namebuf, "Oversized Packets Received");
2115 1.98 msaitoh evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2116 1.98 msaitoh stats->namebuf, "Received Jabber");
2117 1.98 msaitoh evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2118 1.98 msaitoh stats->namebuf, "Management Packets Received");
2119 1.98 msaitoh evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2120 1.98 msaitoh stats->namebuf, "Management Packets Dropped");
2121 1.98 msaitoh evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2122 1.98 msaitoh stats->namebuf, "Checksum Errors");
2123 1.1 dyoung
2124 1.98 msaitoh /* Packet Transmission Stats */
2125 1.98 msaitoh evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2126 1.98 msaitoh stats->namebuf, "Good Octets Transmitted");
2127 1.98 msaitoh evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2128 1.98 msaitoh stats->namebuf, "Total Packets Transmitted");
2129 1.98 msaitoh evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2130 1.98 msaitoh stats->namebuf, "Good Packets Transmitted");
2131 1.98 msaitoh evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2132 1.98 msaitoh stats->namebuf, "Broadcast Packets Transmitted");
2133 1.98 msaitoh evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2134 1.98 msaitoh stats->namebuf, "Multicast Packets Transmitted");
2135 1.98 msaitoh evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2136 1.98 msaitoh stats->namebuf, "Management Packets Transmitted");
2137 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2138 1.98 msaitoh stats->namebuf, "64 byte frames transmitted ");
2139 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2140 1.98 msaitoh stats->namebuf, "65-127 byte frames transmitted");
2141 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2142 1.98 msaitoh stats->namebuf, "128-255 byte frames transmitted");
2143 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2144 1.98 msaitoh stats->namebuf, "256-511 byte frames transmitted");
2145 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2146 1.98 msaitoh stats->namebuf, "512-1023 byte frames transmitted");
2147 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2148 1.98 msaitoh stats->namebuf, "1024-1522 byte frames transmitted");
2149 1.99 msaitoh } /* ixgbe_add_hw_stats */
2150 1.48 msaitoh
2151 1.1 dyoung static void
2152 1.333 msaitoh ixgbe_clear_evcnt(struct ixgbe_softc *sc)
2153 1.1 dyoung {
2154 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
2155 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
2156 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2157 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
2158 1.168 msaitoh int i;
2159 1.98 msaitoh
2160 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
2161 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
2162 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
2163 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
2164 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
2165 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
2166 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
2167 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, 0);
2168 1.333 msaitoh IXGBE_EVC_STORE(&sc->watchdog_events, 0);
2169 1.333 msaitoh IXGBE_EVC_STORE(&sc->admin_irqev, 0);
2170 1.333 msaitoh IXGBE_EVC_STORE(&sc->link_workev, 0);
2171 1.333 msaitoh IXGBE_EVC_STORE(&sc->mod_workev, 0);
2172 1.333 msaitoh IXGBE_EVC_STORE(&sc->msf_workev, 0);
2173 1.333 msaitoh IXGBE_EVC_STORE(&sc->phy_workev, 0);
2174 1.98 msaitoh
2175 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2176 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
2177 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpc[i], 0);
2178 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
2179 1.305 msaitoh IXGBE_EVC_STORE(&stats->rnbc[i], 0);
2180 1.168 msaitoh }
2181 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
2182 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
2183 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
2184 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
2185 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
2186 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2187 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
2188 1.168 msaitoh }
2189 1.168 msaitoh }
2190 1.168 msaitoh
2191 1.333 msaitoh txr = sc->tx_rings;
2192 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
2193 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
2194 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
2195 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].req, 0);
2196 1.305 msaitoh IXGBE_EVC_STORE(&txr->total_packets, 0);
2197 1.98 msaitoh #ifndef IXGBE_LEGACY_TX
2198 1.305 msaitoh IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2199 1.45 msaitoh #endif
2200 1.327 msaitoh IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2201 1.327 msaitoh IXGBE_EVC_STORE(&txr->tso_tx, 0);
2202 1.134 msaitoh txr->q_efbig_tx_dma_setup = 0;
2203 1.134 msaitoh txr->q_mbuf_defrag_failed = 0;
2204 1.134 msaitoh txr->q_efbig2_tx_dma_setup = 0;
2205 1.134 msaitoh txr->q_einval_tx_dma_setup = 0;
2206 1.134 msaitoh txr->q_other_tx_dma_setup = 0;
2207 1.134 msaitoh txr->q_eagain_tx_dma_setup = 0;
2208 1.134 msaitoh txr->q_enomem_tx_dma_setup = 0;
2209 1.134 msaitoh txr->q_tso_err = 0;
2210 1.1 dyoung
2211 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
2212 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprc[i], 0);
2213 1.305 msaitoh IXGBE_EVC_STORE(&stats->qptc[i], 0);
2214 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbrc[i], 0);
2215 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbtc[i], 0);
2216 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2217 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprdc[i], 0);
2218 1.98 msaitoh }
2219 1.98 msaitoh
2220 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2221 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2222 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2223 1.305 msaitoh IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2224 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2225 1.305 msaitoh }
2226 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs, 0);
2227 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs, 0);
2228 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2229 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2230 1.305 msaitoh IXGBE_EVC_STORE(&stats->intzero, 0);
2231 1.305 msaitoh IXGBE_EVC_STORE(&stats->legint, 0);
2232 1.305 msaitoh IXGBE_EVC_STORE(&stats->crcerrs, 0);
2233 1.305 msaitoh IXGBE_EVC_STORE(&stats->illerrc, 0);
2234 1.305 msaitoh IXGBE_EVC_STORE(&stats->errbc, 0);
2235 1.305 msaitoh IXGBE_EVC_STORE(&stats->mspdc, 0);
2236 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2237 1.305 msaitoh IXGBE_EVC_STORE(&stats->mbsdc, 0);
2238 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpctotal, 0);
2239 1.305 msaitoh IXGBE_EVC_STORE(&stats->mlfc, 0);
2240 1.305 msaitoh IXGBE_EVC_STORE(&stats->mrfc, 0);
2241 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2242 1.326 msaitoh IXGBE_EVC_STORE(&stats->link_dn_cnt, 0);
2243 1.305 msaitoh IXGBE_EVC_STORE(&stats->rlec, 0);
2244 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxontxc, 0);
2245 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxonrxc, 0);
2246 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxofftxc, 0);
2247 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
2248 1.98 msaitoh
2249 1.98 msaitoh /* Packet Reception Stats */
2250 1.305 msaitoh IXGBE_EVC_STORE(&stats->tor, 0);
2251 1.305 msaitoh IXGBE_EVC_STORE(&stats->gorc, 0);
2252 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpr, 0);
2253 1.305 msaitoh IXGBE_EVC_STORE(&stats->gprc, 0);
2254 1.305 msaitoh IXGBE_EVC_STORE(&stats->mprc, 0);
2255 1.305 msaitoh IXGBE_EVC_STORE(&stats->bprc, 0);
2256 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc64, 0);
2257 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc127, 0);
2258 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc255, 0);
2259 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc511, 0);
2260 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1023, 0);
2261 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1522, 0);
2262 1.305 msaitoh IXGBE_EVC_STORE(&stats->ruc, 0);
2263 1.305 msaitoh IXGBE_EVC_STORE(&stats->rfc, 0);
2264 1.305 msaitoh IXGBE_EVC_STORE(&stats->roc, 0);
2265 1.305 msaitoh IXGBE_EVC_STORE(&stats->rjc, 0);
2266 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngprc, 0);
2267 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngpdc, 0);
2268 1.305 msaitoh IXGBE_EVC_STORE(&stats->xec, 0);
2269 1.98 msaitoh
2270 1.98 msaitoh /* Packet Transmission Stats */
2271 1.305 msaitoh IXGBE_EVC_STORE(&stats->gotc, 0);
2272 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpt, 0);
2273 1.305 msaitoh IXGBE_EVC_STORE(&stats->gptc, 0);
2274 1.305 msaitoh IXGBE_EVC_STORE(&stats->bptc, 0);
2275 1.305 msaitoh IXGBE_EVC_STORE(&stats->mptc, 0);
2276 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngptc, 0);
2277 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc64, 0);
2278 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc127, 0);
2279 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc255, 0);
2280 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc511, 0);
2281 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1023, 0);
2282 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1522, 0);
2283 1.98 msaitoh }
2284 1.98 msaitoh
2285 1.99 msaitoh /************************************************************************
2286 1.99 msaitoh * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2287 1.99 msaitoh *
2288 1.99 msaitoh * Retrieves the TDH value from the hardware
2289 1.99 msaitoh ************************************************************************/
2290 1.185 msaitoh static int
2291 1.98 msaitoh ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2292 1.98 msaitoh {
2293 1.98 msaitoh struct sysctlnode node = *rnode;
2294 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2295 1.333 msaitoh struct ixgbe_softc *sc;
2296 1.98 msaitoh uint32_t val;
2297 1.98 msaitoh
2298 1.99 msaitoh if (!txr)
2299 1.99 msaitoh return (0);
2300 1.99 msaitoh
2301 1.333 msaitoh sc = txr->sc;
2302 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2303 1.169 msaitoh return (EPERM);
2304 1.169 msaitoh
2305 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->me));
2306 1.98 msaitoh node.sysctl_data = &val;
2307 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2308 1.99 msaitoh } /* ixgbe_sysctl_tdh_handler */
2309 1.98 msaitoh
2310 1.99 msaitoh /************************************************************************
2311 1.99 msaitoh * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2312 1.99 msaitoh *
2313 1.99 msaitoh * Retrieves the TDT value from the hardware
2314 1.99 msaitoh ************************************************************************/
2315 1.185 msaitoh static int
2316 1.98 msaitoh ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2317 1.98 msaitoh {
2318 1.98 msaitoh struct sysctlnode node = *rnode;
2319 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2320 1.333 msaitoh struct ixgbe_softc *sc;
2321 1.98 msaitoh uint32_t val;
2322 1.1 dyoung
2323 1.99 msaitoh if (!txr)
2324 1.99 msaitoh return (0);
2325 1.99 msaitoh
2326 1.333 msaitoh sc = txr->sc;
2327 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2328 1.169 msaitoh return (EPERM);
2329 1.169 msaitoh
2330 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->me));
2331 1.98 msaitoh node.sysctl_data = &val;
2332 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2333 1.99 msaitoh } /* ixgbe_sysctl_tdt_handler */
2334 1.45 msaitoh
2335 1.99 msaitoh /************************************************************************
2336 1.154 msaitoh * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2337 1.154 msaitoh * handler function
2338 1.154 msaitoh *
2339 1.154 msaitoh * Retrieves the next_to_check value
2340 1.154 msaitoh ************************************************************************/
2341 1.185 msaitoh static int
2342 1.154 msaitoh ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2343 1.154 msaitoh {
2344 1.154 msaitoh struct sysctlnode node = *rnode;
2345 1.154 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2346 1.333 msaitoh struct ixgbe_softc *sc;
2347 1.154 msaitoh uint32_t val;
2348 1.154 msaitoh
2349 1.154 msaitoh if (!rxr)
2350 1.154 msaitoh return (0);
2351 1.154 msaitoh
2352 1.333 msaitoh sc = rxr->sc;
2353 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2354 1.169 msaitoh return (EPERM);
2355 1.169 msaitoh
2356 1.154 msaitoh val = rxr->next_to_check;
2357 1.154 msaitoh node.sysctl_data = &val;
2358 1.154 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2359 1.154 msaitoh } /* ixgbe_sysctl_next_to_check_handler */
2360 1.154 msaitoh
2361 1.154 msaitoh /************************************************************************
2362 1.287 msaitoh * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2363 1.287 msaitoh * handler function
2364 1.287 msaitoh *
2365 1.287 msaitoh * Retrieves the next_to_refresh value
2366 1.287 msaitoh ************************************************************************/
2367 1.287 msaitoh static int
2368 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2369 1.287 msaitoh {
2370 1.287 msaitoh struct sysctlnode node = *rnode;
2371 1.287 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2372 1.333 msaitoh struct ixgbe_softc *sc;
2373 1.287 msaitoh uint32_t val;
2374 1.287 msaitoh
2375 1.287 msaitoh if (!rxr)
2376 1.287 msaitoh return (0);
2377 1.287 msaitoh
2378 1.333 msaitoh sc = rxr->sc;
2379 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2380 1.287 msaitoh return (EPERM);
2381 1.287 msaitoh
2382 1.287 msaitoh val = rxr->next_to_refresh;
2383 1.287 msaitoh node.sysctl_data = &val;
2384 1.287 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2385 1.287 msaitoh } /* ixgbe_sysctl_next_to_refresh_handler */
2386 1.287 msaitoh
2387 1.287 msaitoh /************************************************************************
2388 1.99 msaitoh * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2389 1.99 msaitoh *
2390 1.99 msaitoh * Retrieves the RDH value from the hardware
2391 1.99 msaitoh ************************************************************************/
2392 1.185 msaitoh static int
2393 1.98 msaitoh ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2394 1.98 msaitoh {
2395 1.98 msaitoh struct sysctlnode node = *rnode;
2396 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2397 1.333 msaitoh struct ixgbe_softc *sc;
2398 1.98 msaitoh uint32_t val;
2399 1.1 dyoung
2400 1.99 msaitoh if (!rxr)
2401 1.99 msaitoh return (0);
2402 1.99 msaitoh
2403 1.333 msaitoh sc = rxr->sc;
2404 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2405 1.169 msaitoh return (EPERM);
2406 1.169 msaitoh
2407 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDH(rxr->me));
2408 1.98 msaitoh node.sysctl_data = &val;
2409 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2410 1.99 msaitoh } /* ixgbe_sysctl_rdh_handler */
2411 1.1 dyoung
2412 1.99 msaitoh /************************************************************************
2413 1.99 msaitoh * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2414 1.99 msaitoh *
2415 1.99 msaitoh * Retrieves the RDT value from the hardware
2416 1.99 msaitoh ************************************************************************/
2417 1.185 msaitoh static int
2418 1.98 msaitoh ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2419 1.98 msaitoh {
2420 1.98 msaitoh struct sysctlnode node = *rnode;
2421 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2422 1.333 msaitoh struct ixgbe_softc *sc;
2423 1.98 msaitoh uint32_t val;
2424 1.1 dyoung
2425 1.99 msaitoh if (!rxr)
2426 1.99 msaitoh return (0);
2427 1.99 msaitoh
2428 1.333 msaitoh sc = rxr->sc;
2429 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2430 1.169 msaitoh return (EPERM);
2431 1.169 msaitoh
2432 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDT(rxr->me));
2433 1.98 msaitoh node.sysctl_data = &val;
2434 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2435 1.99 msaitoh } /* ixgbe_sysctl_rdt_handler */
2436 1.1 dyoung
2437 1.193 msaitoh static int
2438 1.193 msaitoh ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2439 1.193 msaitoh {
2440 1.193 msaitoh struct ifnet *ifp = &ec->ec_if;
2441 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2442 1.193 msaitoh int rv;
2443 1.193 msaitoh
2444 1.193 msaitoh if (set)
2445 1.333 msaitoh rv = ixgbe_register_vlan(sc, vid);
2446 1.193 msaitoh else
2447 1.333 msaitoh rv = ixgbe_unregister_vlan(sc, vid);
2448 1.193 msaitoh
2449 1.200 msaitoh if (rv != 0)
2450 1.200 msaitoh return rv;
2451 1.200 msaitoh
2452 1.200 msaitoh /*
2453 1.200 msaitoh * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2454 1.200 msaitoh * or 0 to 1.
2455 1.200 msaitoh */
2456 1.200 msaitoh if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2457 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2458 1.200 msaitoh
2459 1.193 msaitoh return rv;
2460 1.193 msaitoh }
2461 1.193 msaitoh
2462 1.99 msaitoh /************************************************************************
2463 1.99 msaitoh * ixgbe_register_vlan
2464 1.99 msaitoh *
2465 1.99 msaitoh * Run via vlan config EVENT, it enables us to use the
2466 1.99 msaitoh * HW Filter table since we can get the vlan id. This
2467 1.99 msaitoh * just creates the entry in the soft version of the
2468 1.99 msaitoh * VFTA, init will repopulate the real table.
2469 1.99 msaitoh ************************************************************************/
2470 1.193 msaitoh static int
2471 1.333 msaitoh ixgbe_register_vlan(struct ixgbe_softc *sc, u16 vtag)
2472 1.98 msaitoh {
2473 1.98 msaitoh u16 index, bit;
2474 1.193 msaitoh int error;
2475 1.48 msaitoh
2476 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2477 1.193 msaitoh return EINVAL;
2478 1.1 dyoung
2479 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2480 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2481 1.98 msaitoh bit = vtag & 0x1F;
2482 1.333 msaitoh sc->shadow_vfta[index] |= ((u32)1 << bit);
2483 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, true,
2484 1.193 msaitoh true);
2485 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2486 1.193 msaitoh if (error != 0)
2487 1.193 msaitoh error = EACCES;
2488 1.193 msaitoh
2489 1.193 msaitoh return error;
2490 1.99 msaitoh } /* ixgbe_register_vlan */
2491 1.1 dyoung
2492 1.99 msaitoh /************************************************************************
2493 1.99 msaitoh * ixgbe_unregister_vlan
2494 1.99 msaitoh *
2495 1.99 msaitoh * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2496 1.99 msaitoh ************************************************************************/
2497 1.193 msaitoh static int
2498 1.333 msaitoh ixgbe_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
2499 1.98 msaitoh {
2500 1.98 msaitoh u16 index, bit;
2501 1.193 msaitoh int error;
2502 1.1 dyoung
2503 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2504 1.193 msaitoh return EINVAL;
2505 1.1 dyoung
2506 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2507 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2508 1.98 msaitoh bit = vtag & 0x1F;
2509 1.333 msaitoh sc->shadow_vfta[index] &= ~((u32)1 << bit);
2510 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, false,
2511 1.193 msaitoh true);
2512 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2513 1.193 msaitoh if (error != 0)
2514 1.193 msaitoh error = EACCES;
2515 1.193 msaitoh
2516 1.193 msaitoh return error;
2517 1.99 msaitoh } /* ixgbe_unregister_vlan */
2518 1.98 msaitoh
2519 1.98 msaitoh static void
2520 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *sc)
2521 1.98 msaitoh {
2522 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2523 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2524 1.98 msaitoh struct rx_ring *rxr;
2525 1.200 msaitoh u32 ctrl;
2526 1.186 msaitoh int i;
2527 1.177 msaitoh bool hwtagging;
2528 1.98 msaitoh
2529 1.178 msaitoh /* Enable HW tagging only if any vlan is attached */
2530 1.177 msaitoh hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2531 1.178 msaitoh && VLAN_ATTACHED(ec);
2532 1.1 dyoung
2533 1.98 msaitoh /* Setup the queues for vlans */
2534 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
2535 1.333 msaitoh rxr = &sc->rx_rings[i];
2536 1.178 msaitoh /*
2537 1.178 msaitoh * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2538 1.178 msaitoh */
2539 1.177 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
2540 1.177 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2541 1.177 msaitoh if (hwtagging)
2542 1.115 msaitoh ctrl |= IXGBE_RXDCTL_VME;
2543 1.177 msaitoh else
2544 1.177 msaitoh ctrl &= ~IXGBE_RXDCTL_VME;
2545 1.177 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2546 1.98 msaitoh }
2547 1.177 msaitoh rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2548 1.1 dyoung }
2549 1.1 dyoung
2550 1.200 msaitoh /* VLAN hw tagging for 82598 */
2551 1.200 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
2552 1.200 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2553 1.200 msaitoh if (hwtagging)
2554 1.200 msaitoh ctrl |= IXGBE_VLNCTRL_VME;
2555 1.200 msaitoh else
2556 1.200 msaitoh ctrl &= ~IXGBE_VLNCTRL_VME;
2557 1.200 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2558 1.200 msaitoh }
2559 1.200 msaitoh } /* ixgbe_setup_vlan_hw_tagging */
2560 1.200 msaitoh
2561 1.200 msaitoh static void
2562 1.333 msaitoh ixgbe_setup_vlan_hw_support(struct ixgbe_softc *sc)
2563 1.200 msaitoh {
2564 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2565 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2566 1.200 msaitoh int i;
2567 1.200 msaitoh u32 ctrl;
2568 1.200 msaitoh struct vlanid_list *vlanidp;
2569 1.200 msaitoh
2570 1.200 msaitoh /*
2571 1.294 skrll * This function is called from both if_init and ifflags_cb()
2572 1.200 msaitoh * on NetBSD.
2573 1.200 msaitoh */
2574 1.200 msaitoh
2575 1.200 msaitoh /*
2576 1.200 msaitoh * Part 1:
2577 1.200 msaitoh * Setup VLAN HW tagging
2578 1.200 msaitoh */
2579 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2580 1.200 msaitoh
2581 1.200 msaitoh /*
2582 1.200 msaitoh * Part 2:
2583 1.200 msaitoh * Setup VLAN HW filter
2584 1.200 msaitoh */
2585 1.193 msaitoh /* Cleanup shadow_vfta */
2586 1.193 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2587 1.333 msaitoh sc->shadow_vfta[i] = 0;
2588 1.193 msaitoh /* Generate shadow_vfta from ec_vids */
2589 1.201 msaitoh ETHER_LOCK(ec);
2590 1.193 msaitoh SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2591 1.193 msaitoh uint32_t idx;
2592 1.193 msaitoh
2593 1.193 msaitoh idx = vlanidp->vid / 32;
2594 1.193 msaitoh KASSERT(idx < IXGBE_VFTA_SIZE);
2595 1.333 msaitoh sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2596 1.193 msaitoh }
2597 1.201 msaitoh ETHER_UNLOCK(ec);
2598 1.99 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2599 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), sc->shadow_vfta[i]);
2600 1.22 msaitoh
2601 1.98 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2602 1.98 msaitoh /* Enable the Filter Table if enabled */
2603 1.177 msaitoh if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2604 1.98 msaitoh ctrl |= IXGBE_VLNCTRL_VFE;
2605 1.177 msaitoh else
2606 1.177 msaitoh ctrl &= ~IXGBE_VLNCTRL_VFE;
2607 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2608 1.99 msaitoh } /* ixgbe_setup_vlan_hw_support */
2609 1.1 dyoung
2610 1.99 msaitoh /************************************************************************
2611 1.99 msaitoh * ixgbe_get_slot_info
2612 1.99 msaitoh *
2613 1.99 msaitoh * Get the width and transaction speed of
2614 1.99 msaitoh * the slot this adapter is plugged into.
2615 1.99 msaitoh ************************************************************************/
2616 1.98 msaitoh static void
2617 1.333 msaitoh ixgbe_get_slot_info(struct ixgbe_softc *sc)
2618 1.98 msaitoh {
2619 1.333 msaitoh device_t dev = sc->dev;
2620 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2621 1.186 msaitoh u32 offset;
2622 1.98 msaitoh u16 link;
2623 1.186 msaitoh int bus_info_valid = TRUE;
2624 1.99 msaitoh
2625 1.99 msaitoh /* Some devices are behind an internal bridge */
2626 1.99 msaitoh switch (hw->device_id) {
2627 1.99 msaitoh case IXGBE_DEV_ID_82599_SFP_SF_QP:
2628 1.99 msaitoh case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2629 1.99 msaitoh goto get_parent_info;
2630 1.99 msaitoh default:
2631 1.99 msaitoh break;
2632 1.99 msaitoh }
2633 1.1 dyoung
2634 1.99 msaitoh ixgbe_get_bus_info(hw);
2635 1.99 msaitoh
2636 1.99 msaitoh /*
2637 1.99 msaitoh * Some devices don't use PCI-E, but there is no need
2638 1.99 msaitoh * to display "Unknown" for bus speed and width.
2639 1.99 msaitoh */
2640 1.99 msaitoh switch (hw->mac.type) {
2641 1.99 msaitoh case ixgbe_mac_X550EM_x:
2642 1.99 msaitoh case ixgbe_mac_X550EM_a:
2643 1.99 msaitoh return;
2644 1.99 msaitoh default:
2645 1.99 msaitoh goto display;
2646 1.1 dyoung }
2647 1.1 dyoung
2648 1.99 msaitoh get_parent_info:
2649 1.98 msaitoh /*
2650 1.99 msaitoh * For the Quad port adapter we need to parse back
2651 1.99 msaitoh * up the PCI tree to find the speed of the expansion
2652 1.99 msaitoh * slot into which this adapter is plugged. A bit more work.
2653 1.99 msaitoh */
2654 1.98 msaitoh dev = device_parent(device_parent(dev));
2655 1.99 msaitoh #if 0
2656 1.98 msaitoh #ifdef IXGBE_DEBUG
2657 1.99 msaitoh device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2658 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2659 1.98 msaitoh #endif
2660 1.98 msaitoh dev = device_parent(device_parent(dev));
2661 1.98 msaitoh #ifdef IXGBE_DEBUG
2662 1.99 msaitoh device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2663 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2664 1.99 msaitoh #endif
2665 1.1 dyoung #endif
2666 1.98 msaitoh /* Now get the PCI Express Capabilities offset */
2667 1.333 msaitoh if (pci_get_capability(sc->osdep.pc, sc->osdep.tag,
2668 1.99 msaitoh PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2669 1.99 msaitoh /*
2670 1.99 msaitoh * Hmm...can't get PCI-Express capabilities.
2671 1.99 msaitoh * Falling back to default method.
2672 1.99 msaitoh */
2673 1.99 msaitoh bus_info_valid = FALSE;
2674 1.99 msaitoh ixgbe_get_bus_info(hw);
2675 1.99 msaitoh goto display;
2676 1.99 msaitoh }
2677 1.98 msaitoh /* ...and read the Link Status Register */
2678 1.333 msaitoh link = pci_conf_read(sc->osdep.pc, sc->osdep.tag,
2679 1.120 msaitoh offset + PCIE_LCSR) >> 16;
2680 1.120 msaitoh ixgbe_set_pci_config_data_generic(hw, link);
2681 1.52 msaitoh
2682 1.98 msaitoh display:
2683 1.99 msaitoh device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2684 1.186 msaitoh ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2685 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2686 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2687 1.99 msaitoh "Unknown"),
2688 1.99 msaitoh ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2689 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2690 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2691 1.99 msaitoh "Unknown"));
2692 1.99 msaitoh
2693 1.99 msaitoh if (bus_info_valid) {
2694 1.99 msaitoh if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2695 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2696 1.99 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500))) {
2697 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2698 1.99 msaitoh " for this card\n is not sufficient for"
2699 1.99 msaitoh " optimal performance.\n");
2700 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2701 1.99 msaitoh "PCIE, or x4 PCIE Gen2 slot is required.\n");
2702 1.99 msaitoh }
2703 1.99 msaitoh if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2704 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2705 1.99 msaitoh (hw->bus.speed < ixgbe_bus_speed_8000))) {
2706 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2707 1.99 msaitoh " for this card\n is not sufficient for"
2708 1.99 msaitoh " optimal performance.\n");
2709 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2710 1.99 msaitoh "PCIE Gen3 slot is required.\n");
2711 1.99 msaitoh }
2712 1.99 msaitoh } else
2713 1.319 msaitoh device_printf(dev,
2714 1.319 msaitoh "Unable to determine slot speed/width. The speed/width "
2715 1.319 msaitoh "reported are that of the internal switch.\n");
2716 1.45 msaitoh
2717 1.45 msaitoh return;
2718 1.99 msaitoh } /* ixgbe_get_slot_info */
2719 1.1 dyoung
2720 1.99 msaitoh /************************************************************************
2721 1.321 msaitoh * ixgbe_enable_queue - Queue Interrupt Enabler
2722 1.99 msaitoh ************************************************************************/
2723 1.1 dyoung static inline void
2724 1.333 msaitoh ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
2725 1.1 dyoung {
2726 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2727 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2728 1.197 msaitoh u64 queue = 1ULL << vector;
2729 1.186 msaitoh u32 mask;
2730 1.1 dyoung
2731 1.139 knakahar mutex_enter(&que->dc_mtx);
2732 1.139 knakahar if (que->disabled_count > 0 && --que->disabled_count > 0)
2733 1.127 knakahar goto out;
2734 1.127 knakahar
2735 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2736 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2737 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2738 1.1 dyoung } else {
2739 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2740 1.98 msaitoh if (mask)
2741 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2742 1.98 msaitoh mask = (queue >> 32);
2743 1.98 msaitoh if (mask)
2744 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2745 1.1 dyoung }
2746 1.127 knakahar out:
2747 1.139 knakahar mutex_exit(&que->dc_mtx);
2748 1.99 msaitoh } /* ixgbe_enable_queue */
2749 1.1 dyoung
2750 1.99 msaitoh /************************************************************************
2751 1.139 knakahar * ixgbe_disable_queue_internal
2752 1.99 msaitoh ************************************************************************/
2753 1.82 msaitoh static inline void
2754 1.333 msaitoh ixgbe_disable_queue_internal(struct ixgbe_softc *sc, u32 vector, bool nestok)
2755 1.1 dyoung {
2756 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2757 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2758 1.197 msaitoh u64 queue = 1ULL << vector;
2759 1.186 msaitoh u32 mask;
2760 1.1 dyoung
2761 1.139 knakahar mutex_enter(&que->dc_mtx);
2762 1.139 knakahar
2763 1.139 knakahar if (que->disabled_count > 0) {
2764 1.139 knakahar if (nestok)
2765 1.139 knakahar que->disabled_count++;
2766 1.139 knakahar goto out;
2767 1.139 knakahar }
2768 1.139 knakahar que->disabled_count++;
2769 1.127 knakahar
2770 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2771 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2772 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2773 1.1 dyoung } else {
2774 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2775 1.98 msaitoh if (mask)
2776 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2777 1.98 msaitoh mask = (queue >> 32);
2778 1.98 msaitoh if (mask)
2779 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2780 1.1 dyoung }
2781 1.127 knakahar out:
2782 1.139 knakahar mutex_exit(&que->dc_mtx);
2783 1.139 knakahar } /* ixgbe_disable_queue_internal */
2784 1.139 knakahar
2785 1.139 knakahar /************************************************************************
2786 1.139 knakahar * ixgbe_disable_queue
2787 1.139 knakahar ************************************************************************/
2788 1.139 knakahar static inline void
2789 1.333 msaitoh ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
2790 1.139 knakahar {
2791 1.139 knakahar
2792 1.333 msaitoh ixgbe_disable_queue_internal(sc, vector, true);
2793 1.99 msaitoh } /* ixgbe_disable_queue */
2794 1.1 dyoung
2795 1.99 msaitoh /************************************************************************
2796 1.133 knakahar * ixgbe_sched_handle_que - schedule deferred packet processing
2797 1.133 knakahar ************************************************************************/
2798 1.133 knakahar static inline void
2799 1.333 msaitoh ixgbe_sched_handle_que(struct ixgbe_softc *sc, struct ix_queue *que)
2800 1.133 knakahar {
2801 1.133 knakahar
2802 1.185 msaitoh if (que->txrx_use_workqueue) {
2803 1.133 knakahar /*
2804 1.333 msaitoh * sc->que_wq is bound to each CPU instead of
2805 1.133 knakahar * each NIC queue to reduce workqueue kthread. As we
2806 1.133 knakahar * should consider about interrupt affinity in this
2807 1.133 knakahar * function, the workqueue kthread must be WQ_PERCPU.
2808 1.133 knakahar * If create WQ_PERCPU workqueue kthread for each NIC
2809 1.133 knakahar * queue, that number of created workqueue kthread is
2810 1.133 knakahar * (number of used NIC queue) * (number of CPUs) =
2811 1.133 knakahar * (number of CPUs) ^ 2 most often.
2812 1.133 knakahar *
2813 1.133 knakahar * The same NIC queue's interrupts are avoided by
2814 1.133 knakahar * masking the queue's interrupt. And different
2815 1.133 knakahar * NIC queue's interrupts use different struct work
2816 1.133 knakahar * (que->wq_cookie). So, "enqueued flag" to avoid
2817 1.133 knakahar * twice workqueue_enqueue() is not required .
2818 1.133 knakahar */
2819 1.333 msaitoh workqueue_enqueue(sc->que_wq, &que->wq_cookie, curcpu());
2820 1.319 msaitoh } else
2821 1.133 knakahar softint_schedule(que->que_si);
2822 1.133 knakahar }
2823 1.133 knakahar
2824 1.133 knakahar /************************************************************************
2825 1.99 msaitoh * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2826 1.99 msaitoh ************************************************************************/
2827 1.34 msaitoh static int
2828 1.1 dyoung ixgbe_msix_que(void *arg)
2829 1.1 dyoung {
2830 1.1 dyoung struct ix_queue *que = arg;
2831 1.339 msaitoh struct ixgbe_softc *sc = que->sc;
2832 1.333 msaitoh struct ifnet *ifp = sc->ifp;
2833 1.1 dyoung struct tx_ring *txr = que->txr;
2834 1.1 dyoung struct rx_ring *rxr = que->rxr;
2835 1.1 dyoung u32 newitr = 0;
2836 1.1 dyoung
2837 1.33 msaitoh /* Protect against spurious interrupts */
2838 1.33 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
2839 1.34 msaitoh return 0;
2840 1.33 msaitoh
2841 1.333 msaitoh ixgbe_disable_queue(sc, que->msix);
2842 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
2843 1.1 dyoung
2844 1.147 knakahar /*
2845 1.147 knakahar * Don't change "que->txrx_use_workqueue" from this point to avoid
2846 1.147 knakahar * flip-flopping softint/workqueue mode in one deferred processing.
2847 1.147 knakahar */
2848 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
2849 1.147 knakahar
2850 1.1 dyoung IXGBE_TX_LOCK(txr);
2851 1.33 msaitoh ixgbe_txeof(txr);
2852 1.1 dyoung IXGBE_TX_UNLOCK(txr);
2853 1.1 dyoung
2854 1.1 dyoung /* Do AIM now? */
2855 1.1 dyoung
2856 1.333 msaitoh if (sc->enable_aim == false)
2857 1.1 dyoung goto no_calc;
2858 1.1 dyoung /*
2859 1.99 msaitoh * Do Adaptive Interrupt Moderation:
2860 1.99 msaitoh * - Write out last calculated setting
2861 1.99 msaitoh * - Calculate based on average size over
2862 1.99 msaitoh * the last interval.
2863 1.99 msaitoh */
2864 1.99 msaitoh if (que->eitr_setting)
2865 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, que->eitr_setting);
2866 1.99 msaitoh
2867 1.98 msaitoh que->eitr_setting = 0;
2868 1.1 dyoung
2869 1.98 msaitoh /* Idle, do nothing */
2870 1.186 msaitoh if ((txr->bytes == 0) && (rxr->bytes == 0))
2871 1.186 msaitoh goto no_calc;
2872 1.185 msaitoh
2873 1.1 dyoung if ((txr->bytes) && (txr->packets))
2874 1.98 msaitoh newitr = txr->bytes/txr->packets;
2875 1.1 dyoung if ((rxr->bytes) && (rxr->packets))
2876 1.165 riastrad newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2877 1.1 dyoung newitr += 24; /* account for hardware frame, crc */
2878 1.1 dyoung
2879 1.1 dyoung /* set an upper boundary */
2880 1.165 riastrad newitr = uimin(newitr, 3000);
2881 1.1 dyoung
2882 1.1 dyoung /* Be nice to the mid range */
2883 1.1 dyoung if ((newitr > 300) && (newitr < 1200))
2884 1.1 dyoung newitr = (newitr / 3);
2885 1.1 dyoung else
2886 1.1 dyoung newitr = (newitr / 2);
2887 1.1 dyoung
2888 1.124 msaitoh /*
2889 1.124 msaitoh * When RSC is used, ITR interval must be larger than RSC_DELAY.
2890 1.124 msaitoh * Currently, we use 2us for RSC_DELAY. The minimum value is always
2891 1.124 msaitoh * greater than 2us on 100M (and 10M?(not documented)), but it's not
2892 1.124 msaitoh * on 1G and higher.
2893 1.124 msaitoh */
2894 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
2895 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL))
2896 1.124 msaitoh if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2897 1.124 msaitoh newitr = IXGBE_MIN_RSC_EITR_10G1G;
2898 1.124 msaitoh
2899 1.186 msaitoh /* save for next interrupt */
2900 1.186 msaitoh que->eitr_setting = newitr;
2901 1.1 dyoung
2902 1.98 msaitoh /* Reset state */
2903 1.98 msaitoh txr->bytes = 0;
2904 1.98 msaitoh txr->packets = 0;
2905 1.98 msaitoh rxr->bytes = 0;
2906 1.98 msaitoh rxr->packets = 0;
2907 1.1 dyoung
2908 1.1 dyoung no_calc:
2909 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
2910 1.99 msaitoh
2911 1.34 msaitoh return 1;
2912 1.99 msaitoh } /* ixgbe_msix_que */
2913 1.1 dyoung
2914 1.99 msaitoh /************************************************************************
2915 1.99 msaitoh * ixgbe_media_status - Media Ioctl callback
2916 1.98 msaitoh *
2917 1.99 msaitoh * Called whenever the user queries the status of
2918 1.99 msaitoh * the interface using ifconfig.
2919 1.99 msaitoh ************************************************************************/
2920 1.98 msaitoh static void
2921 1.98 msaitoh ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2922 1.1 dyoung {
2923 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2924 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2925 1.98 msaitoh int layer;
2926 1.1 dyoung
2927 1.98 msaitoh INIT_DEBUGOUT("ixgbe_media_status: begin");
2928 1.333 msaitoh ixgbe_update_link_status(sc);
2929 1.1 dyoung
2930 1.1 dyoung ifmr->ifm_status = IFM_AVALID;
2931 1.1 dyoung ifmr->ifm_active = IFM_ETHER;
2932 1.1 dyoung
2933 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
2934 1.68 msaitoh ifmr->ifm_active |= IFM_NONE;
2935 1.1 dyoung return;
2936 1.1 dyoung }
2937 1.1 dyoung
2938 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE;
2939 1.333 msaitoh layer = sc->phy_layer;
2940 1.1 dyoung
2941 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2942 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2943 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2944 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2945 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2946 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2947 1.333 msaitoh switch (sc->link_speed) {
2948 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2949 1.43 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2950 1.43 msaitoh break;
2951 1.103 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
2952 1.103 msaitoh ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2953 1.103 msaitoh break;
2954 1.103 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
2955 1.103 msaitoh ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2956 1.103 msaitoh break;
2957 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2958 1.33 msaitoh ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2959 1.43 msaitoh break;
2960 1.43 msaitoh case IXGBE_LINK_SPEED_100_FULL:
2961 1.24 msaitoh ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2962 1.43 msaitoh break;
2963 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
2964 1.99 msaitoh ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2965 1.99 msaitoh break;
2966 1.43 msaitoh }
2967 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2968 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2969 1.333 msaitoh switch (sc->link_speed) {
2970 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2971 1.43 msaitoh ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2972 1.43 msaitoh break;
2973 1.43 msaitoh }
2974 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2975 1.333 msaitoh switch (sc->link_speed) {
2976 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2977 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2978 1.43 msaitoh break;
2979 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2980 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2981 1.43 msaitoh break;
2982 1.43 msaitoh }
2983 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2984 1.333 msaitoh switch (sc->link_speed) {
2985 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2986 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2987 1.43 msaitoh break;
2988 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2989 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2990 1.43 msaitoh break;
2991 1.43 msaitoh }
2992 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2993 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2994 1.333 msaitoh switch (sc->link_speed) {
2995 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2996 1.43 msaitoh ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2997 1.43 msaitoh break;
2998 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2999 1.28 msaitoh ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
3000 1.43 msaitoh break;
3001 1.43 msaitoh }
3002 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3003 1.333 msaitoh switch (sc->link_speed) {
3004 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3005 1.43 msaitoh ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
3006 1.43 msaitoh break;
3007 1.43 msaitoh }
3008 1.43 msaitoh /*
3009 1.99 msaitoh * XXX: These need to use the proper media types once
3010 1.99 msaitoh * they're added.
3011 1.99 msaitoh */
3012 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3013 1.333 msaitoh switch (sc->link_speed) {
3014 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3015 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
3016 1.48 msaitoh break;
3017 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3018 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3019 1.48 msaitoh break;
3020 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3021 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3022 1.48 msaitoh break;
3023 1.48 msaitoh }
3024 1.99 msaitoh else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
3025 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
3026 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3027 1.333 msaitoh switch (sc->link_speed) {
3028 1.48 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3029 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
3030 1.48 msaitoh break;
3031 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3032 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3033 1.48 msaitoh break;
3034 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3035 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3036 1.48 msaitoh break;
3037 1.48 msaitoh }
3038 1.98 msaitoh
3039 1.43 msaitoh /* If nothing is recognized... */
3040 1.43 msaitoh #if 0
3041 1.43 msaitoh if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
3042 1.43 msaitoh ifmr->ifm_active |= IFM_UNKNOWN;
3043 1.43 msaitoh #endif
3044 1.98 msaitoh
3045 1.104 msaitoh ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
3046 1.104 msaitoh
3047 1.44 msaitoh /* Display current flow control setting used on link */
3048 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
3049 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3050 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3051 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
3052 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3053 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3054 1.1 dyoung
3055 1.1 dyoung return;
3056 1.99 msaitoh } /* ixgbe_media_status */
3057 1.1 dyoung
3058 1.99 msaitoh /************************************************************************
3059 1.99 msaitoh * ixgbe_media_change - Media Ioctl callback
3060 1.1 dyoung *
3061 1.99 msaitoh * Called when the user changes speed/duplex using
3062 1.99 msaitoh * media/mediopt option with ifconfig.
3063 1.99 msaitoh ************************************************************************/
3064 1.1 dyoung static int
3065 1.98 msaitoh ixgbe_media_change(struct ifnet *ifp)
3066 1.1 dyoung {
3067 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3068 1.333 msaitoh struct ifmedia *ifm = &sc->media;
3069 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3070 1.43 msaitoh ixgbe_link_speed speed = 0;
3071 1.94 msaitoh ixgbe_link_speed link_caps = 0;
3072 1.94 msaitoh bool negotiate = false;
3073 1.94 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
3074 1.1 dyoung
3075 1.1 dyoung INIT_DEBUGOUT("ixgbe_media_change: begin");
3076 1.1 dyoung
3077 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3078 1.1 dyoung return (EINVAL);
3079 1.1 dyoung
3080 1.44 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
3081 1.144 msaitoh return (EPERM);
3082 1.44 msaitoh
3083 1.43 msaitoh /*
3084 1.99 msaitoh * We don't actually need to check against the supported
3085 1.99 msaitoh * media types of the adapter; ifmedia will take care of
3086 1.99 msaitoh * that for us.
3087 1.99 msaitoh */
3088 1.43 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) {
3089 1.98 msaitoh case IFM_AUTO:
3090 1.98 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3091 1.98 msaitoh &negotiate);
3092 1.98 msaitoh if (err != IXGBE_SUCCESS) {
3093 1.333 msaitoh device_printf(sc->dev, "Unable to determine "
3094 1.98 msaitoh "supported advertise speeds\n");
3095 1.98 msaitoh return (ENODEV);
3096 1.98 msaitoh }
3097 1.98 msaitoh speed |= link_caps;
3098 1.98 msaitoh break;
3099 1.98 msaitoh case IFM_10G_T:
3100 1.98 msaitoh case IFM_10G_LRM:
3101 1.98 msaitoh case IFM_10G_LR:
3102 1.98 msaitoh case IFM_10G_TWINAX:
3103 1.181 msaitoh case IFM_10G_SR:
3104 1.181 msaitoh case IFM_10G_CX4:
3105 1.98 msaitoh case IFM_10G_KR:
3106 1.98 msaitoh case IFM_10G_KX4:
3107 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
3108 1.98 msaitoh break;
3109 1.103 msaitoh case IFM_5000_T:
3110 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
3111 1.103 msaitoh break;
3112 1.103 msaitoh case IFM_2500_T:
3113 1.99 msaitoh case IFM_2500_KX:
3114 1.99 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3115 1.99 msaitoh break;
3116 1.98 msaitoh case IFM_1000_T:
3117 1.98 msaitoh case IFM_1000_LX:
3118 1.98 msaitoh case IFM_1000_SX:
3119 1.98 msaitoh case IFM_1000_KX:
3120 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
3121 1.98 msaitoh break;
3122 1.98 msaitoh case IFM_100_TX:
3123 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
3124 1.98 msaitoh break;
3125 1.99 msaitoh case IFM_10_T:
3126 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
3127 1.99 msaitoh break;
3128 1.140 msaitoh case IFM_NONE:
3129 1.140 msaitoh break;
3130 1.98 msaitoh default:
3131 1.98 msaitoh goto invalid;
3132 1.48 msaitoh }
3133 1.43 msaitoh
3134 1.43 msaitoh hw->mac.autotry_restart = TRUE;
3135 1.43 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
3136 1.333 msaitoh sc->advertise = 0;
3137 1.109 msaitoh if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3138 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3139 1.333 msaitoh sc->advertise |= 1 << 2;
3140 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3141 1.333 msaitoh sc->advertise |= 1 << 1;
3142 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3143 1.333 msaitoh sc->advertise |= 1 << 0;
3144 1.99 msaitoh if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3145 1.333 msaitoh sc->advertise |= 1 << 3;
3146 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3147 1.333 msaitoh sc->advertise |= 1 << 4;
3148 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3149 1.333 msaitoh sc->advertise |= 1 << 5;
3150 1.51 msaitoh }
3151 1.1 dyoung
3152 1.1 dyoung return (0);
3153 1.43 msaitoh
3154 1.43 msaitoh invalid:
3155 1.333 msaitoh device_printf(sc->dev, "Invalid media type!\n");
3156 1.98 msaitoh
3157 1.43 msaitoh return (EINVAL);
3158 1.99 msaitoh } /* ixgbe_media_change */
3159 1.1 dyoung
3160 1.99 msaitoh /************************************************************************
3161 1.320 msaitoh * ixgbe_msix_admin - Link status change ISR (MSI-X)
3162 1.99 msaitoh ************************************************************************/
3163 1.98 msaitoh static int
3164 1.233 msaitoh ixgbe_msix_admin(void *arg)
3165 1.98 msaitoh {
3166 1.333 msaitoh struct ixgbe_softc *sc = arg;
3167 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3168 1.277 msaitoh u32 eicr;
3169 1.273 msaitoh u32 eims_orig;
3170 1.273 msaitoh u32 eims_disable = 0;
3171 1.98 msaitoh
3172 1.333 msaitoh IXGBE_EVC_ADD(&sc->admin_irqev, 1);
3173 1.98 msaitoh
3174 1.273 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3175 1.273 msaitoh /* Pause other interrupts */
3176 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3177 1.273 msaitoh
3178 1.125 knakahar /*
3179 1.273 msaitoh * First get the cause.
3180 1.273 msaitoh *
3181 1.125 knakahar * The specifications of 82598, 82599, X540 and X550 say EICS register
3182 1.125 knakahar * is write only. However, Linux says it is a workaround for silicon
3183 1.273 msaitoh * errata to read EICS instead of EICR to get interrupt cause.
3184 1.273 msaitoh * At least, reading EICR clears lower 16bits of EIMS on 82598.
3185 1.125 knakahar */
3186 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3187 1.98 msaitoh /* Be sure the queue bits are not cleared */
3188 1.99 msaitoh eicr &= ~IXGBE_EICR_RTX_QUEUE;
3189 1.265 msaitoh /* Clear all OTHER interrupts with write */
3190 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3191 1.1 dyoung
3192 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
3193 1.277 msaitoh
3194 1.277 msaitoh /* Re-enable some OTHER interrupts */
3195 1.277 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3196 1.277 msaitoh
3197 1.277 msaitoh return 1;
3198 1.277 msaitoh } /* ixgbe_msix_admin */
3199 1.277 msaitoh
3200 1.277 msaitoh static void
3201 1.333 msaitoh ixgbe_intr_admin_common(struct ixgbe_softc *sc, u32 eicr, u32 *eims_disable)
3202 1.277 msaitoh {
3203 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3204 1.277 msaitoh u32 task_requests = 0;
3205 1.277 msaitoh s32 retval;
3206 1.277 msaitoh
3207 1.266 msaitoh /* Link status change */
3208 1.266 msaitoh if (eicr & IXGBE_EICR_LSC) {
3209 1.266 msaitoh task_requests |= IXGBE_REQUEST_TASK_LSC;
3210 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3211 1.266 msaitoh }
3212 1.266 msaitoh
3213 1.204 msaitoh if (ixgbe_is_sfp(hw)) {
3214 1.310 msaitoh u32 eicr_mask;
3215 1.310 msaitoh
3216 1.204 msaitoh /* Pluggable optics-related interrupt */
3217 1.204 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
3218 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3219 1.204 msaitoh else
3220 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3221 1.204 msaitoh
3222 1.204 msaitoh /*
3223 1.204 msaitoh * An interrupt might not arrive when a module is inserted.
3224 1.204 msaitoh * When an link status change interrupt occurred and the driver
3225 1.204 msaitoh * still regard SFP as unplugged, issue the module softint
3226 1.204 msaitoh * and then issue LSC interrupt.
3227 1.204 msaitoh */
3228 1.204 msaitoh if ((eicr & eicr_mask)
3229 1.204 msaitoh || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3230 1.204 msaitoh && (eicr & IXGBE_EICR_LSC))) {
3231 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD;
3232 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3233 1.204 msaitoh }
3234 1.204 msaitoh
3235 1.204 msaitoh if ((hw->mac.type == ixgbe_mac_82599EB) &&
3236 1.204 msaitoh (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3237 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF;
3238 1.277 msaitoh *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3239 1.204 msaitoh }
3240 1.204 msaitoh }
3241 1.204 msaitoh
3242 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3243 1.311 msaitoh #ifdef IXGBE_FDIR
3244 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
3245 1.99 msaitoh (eicr & IXGBE_EICR_FLOW_DIR)) {
3246 1.333 msaitoh if (!atomic_cas_uint(&sc->fdir_reinit, 0, 1)) {
3247 1.275 msaitoh task_requests |= IXGBE_REQUEST_TASK_FDIR;
3248 1.275 msaitoh /* Disable the interrupt */
3249 1.277 msaitoh *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3250 1.275 msaitoh }
3251 1.99 msaitoh }
3252 1.311 msaitoh #endif
3253 1.99 msaitoh
3254 1.99 msaitoh if (eicr & IXGBE_EICR_ECC) {
3255 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3256 1.312 msaitoh &ixgbe_errlog_intrvl))
3257 1.333 msaitoh device_printf(sc->dev,
3258 1.312 msaitoh "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3259 1.98 msaitoh }
3260 1.1 dyoung
3261 1.98 msaitoh /* Check for over temp condition */
3262 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3263 1.333 msaitoh switch (sc->hw.mac.type) {
3264 1.99 msaitoh case ixgbe_mac_X550EM_a:
3265 1.99 msaitoh if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3266 1.99 msaitoh break;
3267 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3268 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3269 1.99 msaitoh break;
3270 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3271 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3272 1.333 msaitoh device_printf(sc->dev,
3273 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3274 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3275 1.333 msaitoh device_printf(sc->dev,
3276 1.312 msaitoh "System shutdown required!\n");
3277 1.312 msaitoh }
3278 1.99 msaitoh break;
3279 1.99 msaitoh default:
3280 1.99 msaitoh if (!(eicr & IXGBE_EICR_TS))
3281 1.99 msaitoh break;
3282 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3283 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3284 1.99 msaitoh break;
3285 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3286 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3287 1.333 msaitoh device_printf(sc->dev,
3288 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3289 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3290 1.333 msaitoh device_printf(sc->dev,
3291 1.312 msaitoh "System shutdown required!\n");
3292 1.312 msaitoh }
3293 1.99 msaitoh break;
3294 1.99 msaitoh }
3295 1.1 dyoung }
3296 1.99 msaitoh
3297 1.99 msaitoh /* Check for VF message */
3298 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
3299 1.233 msaitoh (eicr & IXGBE_EICR_MAILBOX)) {
3300 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MBX;
3301 1.277 msaitoh *eims_disable |= IXGBE_EIMS_MAILBOX;
3302 1.233 msaitoh }
3303 1.1 dyoung }
3304 1.1 dyoung
3305 1.98 msaitoh /* Check for fan failure */
3306 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3307 1.333 msaitoh ixgbe_check_fan_failure(sc, eicr, true);
3308 1.1 dyoung
3309 1.98 msaitoh /* External PHY interrupt */
3310 1.99 msaitoh if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3311 1.99 msaitoh (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3312 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_PHY;
3313 1.277 msaitoh *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3314 1.233 msaitoh }
3315 1.233 msaitoh
3316 1.233 msaitoh if (task_requests != 0) {
3317 1.333 msaitoh mutex_enter(&sc->admin_mtx);
3318 1.333 msaitoh sc->task_requests |= task_requests;
3319 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
3320 1.333 msaitoh mutex_exit(&sc->admin_mtx);
3321 1.186 msaitoh }
3322 1.277 msaitoh }
3323 1.1 dyoung
3324 1.124 msaitoh static void
3325 1.333 msaitoh ixgbe_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
3326 1.124 msaitoh {
3327 1.185 msaitoh
3328 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
3329 1.186 msaitoh itr |= itr << 16;
3330 1.186 msaitoh else
3331 1.186 msaitoh itr |= IXGBE_EITR_CNT_WDIS;
3332 1.124 msaitoh
3333 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(index), itr);
3334 1.124 msaitoh }
3335 1.124 msaitoh
3336 1.124 msaitoh
3337 1.99 msaitoh /************************************************************************
3338 1.99 msaitoh * ixgbe_sysctl_interrupt_rate_handler
3339 1.99 msaitoh ************************************************************************/
3340 1.98 msaitoh static int
3341 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3342 1.1 dyoung {
3343 1.98 msaitoh struct sysctlnode node = *rnode;
3344 1.99 msaitoh struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3345 1.339 msaitoh struct ixgbe_softc *sc;
3346 1.98 msaitoh uint32_t reg, usec, rate;
3347 1.98 msaitoh int error;
3348 1.45 msaitoh
3349 1.98 msaitoh if (que == NULL)
3350 1.98 msaitoh return 0;
3351 1.169 msaitoh
3352 1.333 msaitoh sc = que->sc;
3353 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
3354 1.169 msaitoh return (EPERM);
3355 1.169 msaitoh
3356 1.333 msaitoh reg = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(que->msix));
3357 1.98 msaitoh usec = ((reg & 0x0FF8) >> 3);
3358 1.98 msaitoh if (usec > 0)
3359 1.98 msaitoh rate = 500000 / usec;
3360 1.98 msaitoh else
3361 1.98 msaitoh rate = 0;
3362 1.98 msaitoh node.sysctl_data = &rate;
3363 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
3364 1.98 msaitoh if (error || newp == NULL)
3365 1.98 msaitoh return error;
3366 1.98 msaitoh reg &= ~0xfff; /* default, no limitation */
3367 1.98 msaitoh if (rate > 0 && rate < 500000) {
3368 1.98 msaitoh if (rate < 1000)
3369 1.98 msaitoh rate = 1000;
3370 1.228 msaitoh reg |= ((4000000 / rate) & 0xff8);
3371 1.124 msaitoh /*
3372 1.124 msaitoh * When RSC is used, ITR interval must be larger than
3373 1.124 msaitoh * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3374 1.124 msaitoh * The minimum value is always greater than 2us on 100M
3375 1.124 msaitoh * (and 10M?(not documented)), but it's not on 1G and higher.
3376 1.124 msaitoh */
3377 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
3378 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3379 1.333 msaitoh if ((sc->num_queues > 1)
3380 1.124 msaitoh && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3381 1.124 msaitoh return EINVAL;
3382 1.124 msaitoh }
3383 1.343 msaitoh sc->max_interrupt_rate = rate;
3384 1.124 msaitoh } else
3385 1.343 msaitoh sc->max_interrupt_rate = 0;
3386 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, reg);
3387 1.99 msaitoh
3388 1.99 msaitoh return (0);
3389 1.99 msaitoh } /* ixgbe_sysctl_interrupt_rate_handler */
3390 1.45 msaitoh
3391 1.98 msaitoh const struct sysctlnode *
3392 1.333 msaitoh ixgbe_sysctl_instance(struct ixgbe_softc *sc)
3393 1.98 msaitoh {
3394 1.98 msaitoh const char *dvname;
3395 1.98 msaitoh struct sysctllog **log;
3396 1.98 msaitoh int rc;
3397 1.98 msaitoh const struct sysctlnode *rnode;
3398 1.1 dyoung
3399 1.333 msaitoh if (sc->sysctltop != NULL)
3400 1.333 msaitoh return sc->sysctltop;
3401 1.1 dyoung
3402 1.333 msaitoh log = &sc->sysctllog;
3403 1.333 msaitoh dvname = device_xname(sc->dev);
3404 1.1 dyoung
3405 1.98 msaitoh if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3406 1.98 msaitoh 0, CTLTYPE_NODE, dvname,
3407 1.98 msaitoh SYSCTL_DESCR("ixgbe information and settings"),
3408 1.98 msaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3409 1.98 msaitoh goto err;
3410 1.63 msaitoh
3411 1.98 msaitoh return rnode;
3412 1.98 msaitoh err:
3413 1.333 msaitoh device_printf(sc->dev,
3414 1.207 msaitoh "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3415 1.98 msaitoh return NULL;
3416 1.63 msaitoh }
3417 1.63 msaitoh
3418 1.99 msaitoh /************************************************************************
3419 1.99 msaitoh * ixgbe_add_device_sysctls
3420 1.99 msaitoh ************************************************************************/
3421 1.63 msaitoh static void
3422 1.333 msaitoh ixgbe_add_device_sysctls(struct ixgbe_softc *sc)
3423 1.1 dyoung {
3424 1.333 msaitoh device_t dev = sc->dev;
3425 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3426 1.98 msaitoh struct sysctllog **log;
3427 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
3428 1.1 dyoung
3429 1.333 msaitoh log = &sc->sysctllog;
3430 1.1 dyoung
3431 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
3432 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl root\n");
3433 1.98 msaitoh return;
3434 1.98 msaitoh }
3435 1.1 dyoung
3436 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3437 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3438 1.158 msaitoh "debug", SYSCTL_DESCR("Debug Info"),
3439 1.333 msaitoh ixgbe_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL)
3440 1.280 msaitoh != 0)
3441 1.158 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3442 1.158 msaitoh
3443 1.158 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3444 1.286 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3445 1.286 msaitoh "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3446 1.286 msaitoh ixgbe_sysctl_rx_copy_len, 0,
3447 1.333 msaitoh (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
3448 1.286 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3449 1.286 msaitoh
3450 1.286 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3451 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3452 1.314 msaitoh "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
3453 1.333 msaitoh NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3454 1.314 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3455 1.314 msaitoh
3456 1.314 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3457 1.314 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3458 1.314 msaitoh "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
3459 1.333 msaitoh NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3460 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3461 1.1 dyoung
3462 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3463 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
3464 1.313 msaitoh SYSCTL_DESCR("max number of RX packets to process"),
3465 1.333 msaitoh ixgbe_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3466 1.313 msaitoh CTL_EOL) != 0)
3467 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3468 1.313 msaitoh
3469 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3470 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
3471 1.313 msaitoh SYSCTL_DESCR("max number of TX packets to process"),
3472 1.333 msaitoh ixgbe_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3473 1.313 msaitoh CTL_EOL) != 0)
3474 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3475 1.313 msaitoh
3476 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3477 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3478 1.98 msaitoh "num_queues", SYSCTL_DESCR("Number of queues"),
3479 1.333 msaitoh NULL, 0, &sc->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3480 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3481 1.43 msaitoh
3482 1.98 msaitoh /* Sysctls for all devices */
3483 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3484 1.99 msaitoh CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3485 1.333 msaitoh ixgbe_sysctl_flowcntl, 0, (void *)sc, 0, CTL_CREATE,
3486 1.99 msaitoh CTL_EOL) != 0)
3487 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3488 1.63 msaitoh
3489 1.333 msaitoh sc->enable_aim = ixgbe_enable_aim;
3490 1.343 msaitoh sc->max_interrupt_rate = ixgbe_max_interrupt_rate;
3491 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3492 1.99 msaitoh CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3493 1.333 msaitoh NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3494 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3495 1.1 dyoung
3496 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3497 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3498 1.98 msaitoh "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3499 1.333 msaitoh ixgbe_sysctl_advertise, 0, (void *)sc, 0, CTL_CREATE,
3500 1.99 msaitoh CTL_EOL) != 0)
3501 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3502 1.1 dyoung
3503 1.147 knakahar /*
3504 1.147 knakahar * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3505 1.147 knakahar * it causesflip-flopping softint/workqueue mode in one deferred
3506 1.147 knakahar * processing. Therefore, preempt_disable()/preempt_enable() are
3507 1.147 knakahar * required in ixgbe_sched_handle_que() to avoid
3508 1.147 knakahar * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3509 1.147 knakahar * I think changing "que->txrx_use_workqueue" in interrupt handler
3510 1.147 knakahar * is lighter than doing preempt_disable()/preempt_enable() in every
3511 1.147 knakahar * ixgbe_sched_handle_que().
3512 1.147 knakahar */
3513 1.333 msaitoh sc->txrx_use_workqueue = ixgbe_txrx_workqueue;
3514 1.128 knakahar if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3515 1.280 msaitoh CTLTYPE_BOOL, "txrx_workqueue",
3516 1.280 msaitoh SYSCTL_DESCR("Use workqueue for packet processing"),
3517 1.333 msaitoh NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE,
3518 1.280 msaitoh CTL_EOL) != 0)
3519 1.128 knakahar aprint_error_dev(dev, "could not create sysctl\n");
3520 1.128 knakahar
3521 1.98 msaitoh #ifdef IXGBE_DEBUG
3522 1.98 msaitoh /* testing sysctls (for all devices) */
3523 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3524 1.99 msaitoh CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3525 1.333 msaitoh ixgbe_sysctl_power_state, 0, (void *)sc, 0, CTL_CREATE,
3526 1.99 msaitoh CTL_EOL) != 0)
3527 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3528 1.45 msaitoh
3529 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3530 1.99 msaitoh CTLTYPE_STRING, "print_rss_config",
3531 1.99 msaitoh SYSCTL_DESCR("Prints RSS Configuration"),
3532 1.333 msaitoh ixgbe_sysctl_print_rss_config, 0, (void *)sc, 0, CTL_CREATE,
3533 1.99 msaitoh CTL_EOL) != 0)
3534 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3535 1.98 msaitoh #endif
3536 1.98 msaitoh /* for X550 series devices */
3537 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3538 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3539 1.99 msaitoh CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3540 1.333 msaitoh ixgbe_sysctl_dmac, 0, (void *)sc, 0, CTL_CREATE,
3541 1.99 msaitoh CTL_EOL) != 0)
3542 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3543 1.1 dyoung
3544 1.98 msaitoh /* for WoL-capable devices */
3545 1.333 msaitoh if (sc->wol_support) {
3546 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3547 1.99 msaitoh CTLTYPE_BOOL, "wol_enable",
3548 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3549 1.333 msaitoh ixgbe_sysctl_wol_enable, 0, (void *)sc, 0, CTL_CREATE,
3550 1.99 msaitoh CTL_EOL) != 0)
3551 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3552 1.1 dyoung
3553 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3554 1.99 msaitoh CTLTYPE_INT, "wufc",
3555 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3556 1.333 msaitoh ixgbe_sysctl_wufc, 0, (void *)sc, 0, CTL_CREATE,
3557 1.99 msaitoh CTL_EOL) != 0)
3558 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3559 1.98 msaitoh }
3560 1.1 dyoung
3561 1.98 msaitoh /* for X552/X557-AT devices */
3562 1.325 msaitoh if ((hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) ||
3563 1.325 msaitoh (hw->device_id == IXGBE_DEV_ID_X550EM_A_10G_T)) {
3564 1.98 msaitoh const struct sysctlnode *phy_node;
3565 1.1 dyoung
3566 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3567 1.98 msaitoh "phy", SYSCTL_DESCR("External PHY sysctls"),
3568 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3569 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3570 1.98 msaitoh return;
3571 1.98 msaitoh }
3572 1.1 dyoung
3573 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3574 1.99 msaitoh CTLTYPE_INT, "temp",
3575 1.99 msaitoh SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3576 1.333 msaitoh ixgbe_sysctl_phy_temp, 0, (void *)sc, 0, CTL_CREATE,
3577 1.99 msaitoh CTL_EOL) != 0)
3578 1.99 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3579 1.99 msaitoh
3580 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3581 1.99 msaitoh CTLTYPE_INT, "overtemp_occurred",
3582 1.280 msaitoh SYSCTL_DESCR(
3583 1.280 msaitoh "External PHY High Temperature Event Occurred"),
3584 1.333 msaitoh ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)sc, 0,
3585 1.99 msaitoh CTL_CREATE, CTL_EOL) != 0)
3586 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3587 1.99 msaitoh }
3588 1.33 msaitoh
3589 1.163 msaitoh if ((hw->mac.type == ixgbe_mac_X550EM_a)
3590 1.163 msaitoh && (hw->phy.type == ixgbe_phy_fw))
3591 1.163 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3592 1.163 msaitoh CTLTYPE_BOOL, "force_10_100_autonego",
3593 1.163 msaitoh SYSCTL_DESCR("Force autonego on 10M and 100M"),
3594 1.163 msaitoh NULL, 0, &hw->phy.force_10_100_autonego, 0,
3595 1.163 msaitoh CTL_CREATE, CTL_EOL) != 0)
3596 1.163 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3597 1.163 msaitoh
3598 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) {
3599 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3600 1.99 msaitoh CTLTYPE_INT, "eee_state",
3601 1.99 msaitoh SYSCTL_DESCR("EEE Power Save State"),
3602 1.333 msaitoh ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE,
3603 1.99 msaitoh CTL_EOL) != 0)
3604 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3605 1.98 msaitoh }
3606 1.99 msaitoh } /* ixgbe_add_device_sysctls */
3607 1.1 dyoung
3608 1.99 msaitoh /************************************************************************
3609 1.99 msaitoh * ixgbe_allocate_pci_resources
3610 1.99 msaitoh ************************************************************************/
3611 1.98 msaitoh static int
3612 1.333 msaitoh ixgbe_allocate_pci_resources(struct ixgbe_softc *sc,
3613 1.98 msaitoh const struct pci_attach_args *pa)
3614 1.1 dyoung {
3615 1.346 msaitoh pcireg_t memtype, csr;
3616 1.333 msaitoh device_t dev = sc->dev;
3617 1.98 msaitoh bus_addr_t addr;
3618 1.98 msaitoh int flags;
3619 1.1 dyoung
3620 1.98 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3621 1.98 msaitoh switch (memtype) {
3622 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3623 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3624 1.333 msaitoh sc->osdep.mem_bus_space_tag = pa->pa_memt;
3625 1.98 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3626 1.333 msaitoh memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
3627 1.98 msaitoh goto map_err;
3628 1.98 msaitoh if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3629 1.98 msaitoh aprint_normal_dev(dev, "clearing prefetchable bit\n");
3630 1.98 msaitoh flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3631 1.98 msaitoh }
3632 1.333 msaitoh if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
3633 1.333 msaitoh sc->osdep.mem_size, flags,
3634 1.333 msaitoh &sc->osdep.mem_bus_space_handle) != 0) {
3635 1.98 msaitoh map_err:
3636 1.333 msaitoh sc->osdep.mem_size = 0;
3637 1.98 msaitoh aprint_error_dev(dev, "unable to map BAR0\n");
3638 1.98 msaitoh return ENXIO;
3639 1.98 msaitoh }
3640 1.171 msaitoh /*
3641 1.171 msaitoh * Enable address decoding for memory range in case BIOS or
3642 1.171 msaitoh * UEFI don't set it.
3643 1.171 msaitoh */
3644 1.171 msaitoh csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3645 1.171 msaitoh PCI_COMMAND_STATUS_REG);
3646 1.171 msaitoh csr |= PCI_COMMAND_MEM_ENABLE;
3647 1.171 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3648 1.171 msaitoh csr);
3649 1.98 msaitoh break;
3650 1.98 msaitoh default:
3651 1.98 msaitoh aprint_error_dev(dev, "unexpected type on BAR0\n");
3652 1.98 msaitoh return ENXIO;
3653 1.98 msaitoh }
3654 1.1 dyoung
3655 1.98 msaitoh return (0);
3656 1.99 msaitoh } /* ixgbe_allocate_pci_resources */
3657 1.1 dyoung
3658 1.119 msaitoh static void
3659 1.333 msaitoh ixgbe_free_deferred_handlers(struct ixgbe_softc *sc)
3660 1.119 msaitoh {
3661 1.333 msaitoh struct ix_queue *que = sc->queues;
3662 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3663 1.119 msaitoh int i;
3664 1.119 msaitoh
3665 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++, txr++) {
3666 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3667 1.119 msaitoh if (txr->txr_si != NULL)
3668 1.119 msaitoh softint_disestablish(txr->txr_si);
3669 1.119 msaitoh }
3670 1.119 msaitoh if (que->que_si != NULL)
3671 1.119 msaitoh softint_disestablish(que->que_si);
3672 1.119 msaitoh }
3673 1.333 msaitoh if (sc->txr_wq != NULL)
3674 1.333 msaitoh workqueue_destroy(sc->txr_wq);
3675 1.333 msaitoh if (sc->txr_wq_enqueued != NULL)
3676 1.333 msaitoh percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
3677 1.333 msaitoh if (sc->que_wq != NULL)
3678 1.333 msaitoh workqueue_destroy(sc->que_wq);
3679 1.333 msaitoh
3680 1.333 msaitoh if (sc->admin_wq != NULL) {
3681 1.333 msaitoh workqueue_destroy(sc->admin_wq);
3682 1.333 msaitoh sc->admin_wq = NULL;
3683 1.333 msaitoh }
3684 1.333 msaitoh if (sc->timer_wq != NULL) {
3685 1.333 msaitoh workqueue_destroy(sc->timer_wq);
3686 1.333 msaitoh sc->timer_wq = NULL;
3687 1.233 msaitoh }
3688 1.333 msaitoh if (sc->recovery_mode_timer_wq != NULL) {
3689 1.236 msaitoh /*
3690 1.236 msaitoh * ixgbe_ifstop() doesn't call the workqueue_wait() for
3691 1.236 msaitoh * the recovery_mode_timer workqueue, so call it here.
3692 1.236 msaitoh */
3693 1.333 msaitoh workqueue_wait(sc->recovery_mode_timer_wq,
3694 1.333 msaitoh &sc->recovery_mode_timer_wc);
3695 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
3696 1.333 msaitoh workqueue_destroy(sc->recovery_mode_timer_wq);
3697 1.333 msaitoh sc->recovery_mode_timer_wq = NULL;
3698 1.119 msaitoh }
3699 1.257 msaitoh } /* ixgbe_free_deferred_handlers */
3700 1.119 msaitoh
3701 1.99 msaitoh /************************************************************************
3702 1.99 msaitoh * ixgbe_detach - Device removal routine
3703 1.1 dyoung *
3704 1.99 msaitoh * Called when the driver is being removed.
3705 1.99 msaitoh * Stops the adapter and deallocates all the resources
3706 1.99 msaitoh * that were allocated for driver operation.
3707 1.1 dyoung *
3708 1.99 msaitoh * return 0 on success, positive on failure
3709 1.99 msaitoh ************************************************************************/
3710 1.98 msaitoh static int
3711 1.98 msaitoh ixgbe_detach(device_t dev, int flags)
3712 1.1 dyoung {
3713 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3714 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
3715 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3716 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3717 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
3718 1.98 msaitoh u32 ctrl_ext;
3719 1.168 msaitoh int i;
3720 1.28 msaitoh
3721 1.98 msaitoh INIT_DEBUGOUT("ixgbe_detach: begin");
3722 1.333 msaitoh if (sc->osdep.attached == false)
3723 1.98 msaitoh return 0;
3724 1.26 msaitoh
3725 1.99 msaitoh if (ixgbe_pci_iov_detach(dev) != 0) {
3726 1.99 msaitoh device_printf(dev, "SR-IOV in use; detach first.\n");
3727 1.99 msaitoh return (EBUSY);
3728 1.99 msaitoh }
3729 1.99 msaitoh
3730 1.333 msaitoh if (VLAN_ATTACHED(&sc->osdep.ec) &&
3731 1.293 yamaguch (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
3732 1.99 msaitoh aprint_error_dev(dev, "VLANs in use, detach first\n");
3733 1.99 msaitoh return (EBUSY);
3734 1.26 msaitoh }
3735 1.293 yamaguch
3736 1.333 msaitoh ether_ifdetach(sc->ifp);
3737 1.24 msaitoh
3738 1.333 msaitoh sc->osdep.detaching = true;
3739 1.241 msaitoh /*
3740 1.252 msaitoh * Stop the interface. ixgbe_setup_low_power_mode() calls
3741 1.253 msaitoh * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3742 1.252 msaitoh * directly.
3743 1.241 msaitoh */
3744 1.333 msaitoh ixgbe_setup_low_power_mode(sc);
3745 1.241 msaitoh
3746 1.333 msaitoh callout_halt(&sc->timer, NULL);
3747 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3748 1.333 msaitoh callout_halt(&sc->recovery_mode_timer, NULL);
3749 1.333 msaitoh
3750 1.333 msaitoh workqueue_wait(sc->admin_wq, &sc->admin_wc);
3751 1.333 msaitoh atomic_store_relaxed(&sc->admin_pending, 0);
3752 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
3753 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
3754 1.241 msaitoh
3755 1.98 msaitoh pmf_device_deregister(dev);
3756 1.26 msaitoh
3757 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
3758 1.185 msaitoh
3759 1.98 msaitoh /* let hardware know driver is unloading */
3760 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3761 1.98 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3762 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3763 1.24 msaitoh
3764 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
3765 1.333 msaitoh netmap_detach(sc->ifp);
3766 1.99 msaitoh
3767 1.333 msaitoh ixgbe_free_pci_resources(sc);
3768 1.98 msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */
3769 1.98 msaitoh bus_generic_detach(dev);
3770 1.98 msaitoh #endif
3771 1.333 msaitoh if_detach(sc->ifp);
3772 1.333 msaitoh ifmedia_fini(&sc->media);
3773 1.333 msaitoh if_percpuq_destroy(sc->ipq);
3774 1.333 msaitoh
3775 1.333 msaitoh sysctl_teardown(&sc->sysctllog);
3776 1.333 msaitoh evcnt_detach(&sc->efbig_tx_dma_setup);
3777 1.333 msaitoh evcnt_detach(&sc->mbuf_defrag_failed);
3778 1.333 msaitoh evcnt_detach(&sc->efbig2_tx_dma_setup);
3779 1.333 msaitoh evcnt_detach(&sc->einval_tx_dma_setup);
3780 1.333 msaitoh evcnt_detach(&sc->other_tx_dma_setup);
3781 1.333 msaitoh evcnt_detach(&sc->eagain_tx_dma_setup);
3782 1.333 msaitoh evcnt_detach(&sc->enomem_tx_dma_setup);
3783 1.333 msaitoh evcnt_detach(&sc->watchdog_events);
3784 1.333 msaitoh evcnt_detach(&sc->tso_err);
3785 1.333 msaitoh evcnt_detach(&sc->admin_irqev);
3786 1.333 msaitoh evcnt_detach(&sc->link_workev);
3787 1.333 msaitoh evcnt_detach(&sc->mod_workev);
3788 1.333 msaitoh evcnt_detach(&sc->msf_workev);
3789 1.333 msaitoh evcnt_detach(&sc->phy_workev);
3790 1.1 dyoung
3791 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3792 1.98 msaitoh if (i < __arraycount(stats->mpc)) {
3793 1.98 msaitoh evcnt_detach(&stats->mpc[i]);
3794 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
3795 1.98 msaitoh evcnt_detach(&stats->rnbc[i]);
3796 1.98 msaitoh }
3797 1.98 msaitoh if (i < __arraycount(stats->pxontxc)) {
3798 1.98 msaitoh evcnt_detach(&stats->pxontxc[i]);
3799 1.98 msaitoh evcnt_detach(&stats->pxonrxc[i]);
3800 1.98 msaitoh evcnt_detach(&stats->pxofftxc[i]);
3801 1.98 msaitoh evcnt_detach(&stats->pxoffrxc[i]);
3802 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3803 1.151 msaitoh evcnt_detach(&stats->pxon2offc[i]);
3804 1.98 msaitoh }
3805 1.168 msaitoh }
3806 1.168 msaitoh
3807 1.333 msaitoh txr = sc->tx_rings;
3808 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
3809 1.333 msaitoh evcnt_detach(&sc->queues[i].irqs);
3810 1.333 msaitoh evcnt_detach(&sc->queues[i].handleq);
3811 1.333 msaitoh evcnt_detach(&sc->queues[i].req);
3812 1.168 msaitoh evcnt_detach(&txr->total_packets);
3813 1.168 msaitoh #ifndef IXGBE_LEGACY_TX
3814 1.168 msaitoh evcnt_detach(&txr->pcq_drops);
3815 1.168 msaitoh #endif
3816 1.327 msaitoh evcnt_detach(&txr->no_desc_avail);
3817 1.327 msaitoh evcnt_detach(&txr->tso_tx);
3818 1.168 msaitoh
3819 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
3820 1.98 msaitoh evcnt_detach(&stats->qprc[i]);
3821 1.98 msaitoh evcnt_detach(&stats->qptc[i]);
3822 1.98 msaitoh evcnt_detach(&stats->qbrc[i]);
3823 1.98 msaitoh evcnt_detach(&stats->qbtc[i]);
3824 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3825 1.151 msaitoh evcnt_detach(&stats->qprdc[i]);
3826 1.34 msaitoh }
3827 1.98 msaitoh
3828 1.98 msaitoh evcnt_detach(&rxr->rx_packets);
3829 1.98 msaitoh evcnt_detach(&rxr->rx_bytes);
3830 1.98 msaitoh evcnt_detach(&rxr->rx_copies);
3831 1.290 msaitoh evcnt_detach(&rxr->no_mbuf);
3832 1.98 msaitoh evcnt_detach(&rxr->rx_discarded);
3833 1.1 dyoung }
3834 1.98 msaitoh evcnt_detach(&stats->ipcs);
3835 1.98 msaitoh evcnt_detach(&stats->l4cs);
3836 1.98 msaitoh evcnt_detach(&stats->ipcs_bad);
3837 1.98 msaitoh evcnt_detach(&stats->l4cs_bad);
3838 1.98 msaitoh evcnt_detach(&stats->intzero);
3839 1.98 msaitoh evcnt_detach(&stats->legint);
3840 1.98 msaitoh evcnt_detach(&stats->crcerrs);
3841 1.98 msaitoh evcnt_detach(&stats->illerrc);
3842 1.98 msaitoh evcnt_detach(&stats->errbc);
3843 1.98 msaitoh evcnt_detach(&stats->mspdc);
3844 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3845 1.98 msaitoh evcnt_detach(&stats->mbsdc);
3846 1.98 msaitoh evcnt_detach(&stats->mpctotal);
3847 1.98 msaitoh evcnt_detach(&stats->mlfc);
3848 1.98 msaitoh evcnt_detach(&stats->mrfc);
3849 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
3850 1.326 msaitoh evcnt_detach(&stats->link_dn_cnt);
3851 1.98 msaitoh evcnt_detach(&stats->rlec);
3852 1.98 msaitoh evcnt_detach(&stats->lxontxc);
3853 1.98 msaitoh evcnt_detach(&stats->lxonrxc);
3854 1.98 msaitoh evcnt_detach(&stats->lxofftxc);
3855 1.98 msaitoh evcnt_detach(&stats->lxoffrxc);
3856 1.98 msaitoh
3857 1.98 msaitoh /* Packet Reception Stats */
3858 1.98 msaitoh evcnt_detach(&stats->tor);
3859 1.98 msaitoh evcnt_detach(&stats->gorc);
3860 1.98 msaitoh evcnt_detach(&stats->tpr);
3861 1.98 msaitoh evcnt_detach(&stats->gprc);
3862 1.98 msaitoh evcnt_detach(&stats->mprc);
3863 1.98 msaitoh evcnt_detach(&stats->bprc);
3864 1.98 msaitoh evcnt_detach(&stats->prc64);
3865 1.98 msaitoh evcnt_detach(&stats->prc127);
3866 1.98 msaitoh evcnt_detach(&stats->prc255);
3867 1.98 msaitoh evcnt_detach(&stats->prc511);
3868 1.98 msaitoh evcnt_detach(&stats->prc1023);
3869 1.98 msaitoh evcnt_detach(&stats->prc1522);
3870 1.98 msaitoh evcnt_detach(&stats->ruc);
3871 1.98 msaitoh evcnt_detach(&stats->rfc);
3872 1.98 msaitoh evcnt_detach(&stats->roc);
3873 1.98 msaitoh evcnt_detach(&stats->rjc);
3874 1.98 msaitoh evcnt_detach(&stats->mngprc);
3875 1.98 msaitoh evcnt_detach(&stats->mngpdc);
3876 1.98 msaitoh evcnt_detach(&stats->xec);
3877 1.1 dyoung
3878 1.98 msaitoh /* Packet Transmission Stats */
3879 1.98 msaitoh evcnt_detach(&stats->gotc);
3880 1.98 msaitoh evcnt_detach(&stats->tpt);
3881 1.98 msaitoh evcnt_detach(&stats->gptc);
3882 1.98 msaitoh evcnt_detach(&stats->bptc);
3883 1.98 msaitoh evcnt_detach(&stats->mptc);
3884 1.98 msaitoh evcnt_detach(&stats->mngptc);
3885 1.98 msaitoh evcnt_detach(&stats->ptc64);
3886 1.98 msaitoh evcnt_detach(&stats->ptc127);
3887 1.98 msaitoh evcnt_detach(&stats->ptc255);
3888 1.98 msaitoh evcnt_detach(&stats->ptc511);
3889 1.98 msaitoh evcnt_detach(&stats->ptc1023);
3890 1.98 msaitoh evcnt_detach(&stats->ptc1522);
3891 1.1 dyoung
3892 1.333 msaitoh ixgbe_free_queues(sc);
3893 1.333 msaitoh free(sc->mta, M_DEVBUF);
3894 1.1 dyoung
3895 1.333 msaitoh mutex_destroy(&sc->admin_mtx); /* XXX appropriate order? */
3896 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
3897 1.1 dyoung
3898 1.1 dyoung return (0);
3899 1.99 msaitoh } /* ixgbe_detach */
3900 1.1 dyoung
3901 1.99 msaitoh /************************************************************************
3902 1.99 msaitoh * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3903 1.99 msaitoh *
3904 1.99 msaitoh * Prepare the adapter/port for LPLU and/or WoL
3905 1.99 msaitoh ************************************************************************/
3906 1.1 dyoung static int
3907 1.333 msaitoh ixgbe_setup_low_power_mode(struct ixgbe_softc *sc)
3908 1.1 dyoung {
3909 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3910 1.333 msaitoh device_t dev = sc->dev;
3911 1.333 msaitoh struct ifnet *ifp = sc->ifp;
3912 1.186 msaitoh s32 error = 0;
3913 1.98 msaitoh
3914 1.98 msaitoh /* Limit power management flow to X550EM baseT */
3915 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3916 1.99 msaitoh hw->phy.ops.enter_lplu) {
3917 1.98 msaitoh /* X550EM baseT adapters need a special LPLU flow */
3918 1.98 msaitoh hw->phy.reset_disable = true;
3919 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3920 1.98 msaitoh error = hw->phy.ops.enter_lplu(hw);
3921 1.98 msaitoh if (error)
3922 1.98 msaitoh device_printf(dev,
3923 1.98 msaitoh "Error entering LPLU: %d\n", error);
3924 1.98 msaitoh hw->phy.reset_disable = false;
3925 1.98 msaitoh } else {
3926 1.98 msaitoh /* Just stop for other adapters */
3927 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3928 1.33 msaitoh }
3929 1.1 dyoung
3930 1.333 msaitoh IXGBE_CORE_LOCK(sc);
3931 1.253 msaitoh
3932 1.98 msaitoh if (!hw->wol_enabled) {
3933 1.98 msaitoh ixgbe_set_phy_power(hw, FALSE);
3934 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3935 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3936 1.98 msaitoh } else {
3937 1.98 msaitoh /* Turn off support for APM wakeup. (Using ACPI instead) */
3938 1.166 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3939 1.166 msaitoh IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3940 1.34 msaitoh
3941 1.35 msaitoh /*
3942 1.98 msaitoh * Clear Wake Up Status register to prevent any previous wakeup
3943 1.98 msaitoh * events from waking us up immediately after we suspend.
3944 1.33 msaitoh */
3945 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3946 1.98 msaitoh
3947 1.1 dyoung /*
3948 1.98 msaitoh * Program the Wakeup Filter Control register with user filter
3949 1.98 msaitoh * settings
3950 1.33 msaitoh */
3951 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3952 1.98 msaitoh
3953 1.98 msaitoh /* Enable wakeups and power management in Wakeup Control */
3954 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC,
3955 1.98 msaitoh IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3956 1.1 dyoung }
3957 1.1 dyoung
3958 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
3959 1.253 msaitoh
3960 1.98 msaitoh return error;
3961 1.99 msaitoh } /* ixgbe_setup_low_power_mode */
3962 1.98 msaitoh
3963 1.99 msaitoh /************************************************************************
3964 1.99 msaitoh * ixgbe_shutdown - Shutdown entry point
3965 1.99 msaitoh ************************************************************************/
3966 1.98 msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3967 1.98 msaitoh static int
3968 1.98 msaitoh ixgbe_shutdown(device_t dev)
3969 1.98 msaitoh {
3970 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3971 1.98 msaitoh int error = 0;
3972 1.34 msaitoh
3973 1.98 msaitoh INIT_DEBUGOUT("ixgbe_shutdown: begin");
3974 1.34 msaitoh
3975 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3976 1.1 dyoung
3977 1.98 msaitoh return (error);
3978 1.99 msaitoh } /* ixgbe_shutdown */
3979 1.98 msaitoh #endif
3980 1.1 dyoung
3981 1.99 msaitoh /************************************************************************
3982 1.99 msaitoh * ixgbe_suspend
3983 1.99 msaitoh *
3984 1.99 msaitoh * From D0 to D3
3985 1.99 msaitoh ************************************************************************/
3986 1.98 msaitoh static bool
3987 1.98 msaitoh ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3988 1.1 dyoung {
3989 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3990 1.186 msaitoh int error = 0;
3991 1.98 msaitoh
3992 1.98 msaitoh INIT_DEBUGOUT("ixgbe_suspend: begin");
3993 1.98 msaitoh
3994 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3995 1.1 dyoung
3996 1.98 msaitoh return (error);
3997 1.99 msaitoh } /* ixgbe_suspend */
3998 1.1 dyoung
3999 1.99 msaitoh /************************************************************************
4000 1.99 msaitoh * ixgbe_resume
4001 1.99 msaitoh *
4002 1.99 msaitoh * From D3 to D0
4003 1.99 msaitoh ************************************************************************/
4004 1.98 msaitoh static bool
4005 1.98 msaitoh ixgbe_resume(device_t dev, const pmf_qual_t *qual)
4006 1.98 msaitoh {
4007 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
4008 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4009 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4010 1.186 msaitoh u32 wus;
4011 1.1 dyoung
4012 1.98 msaitoh INIT_DEBUGOUT("ixgbe_resume: begin");
4013 1.33 msaitoh
4014 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4015 1.43 msaitoh
4016 1.98 msaitoh /* Read & clear WUS register */
4017 1.98 msaitoh wus = IXGBE_READ_REG(hw, IXGBE_WUS);
4018 1.98 msaitoh if (wus)
4019 1.98 msaitoh device_printf(dev, "Woken up by (WUS): %#010x\n",
4020 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_WUS));
4021 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4022 1.98 msaitoh /* And clear WUFC until next low-power transition */
4023 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4024 1.1 dyoung
4025 1.1 dyoung /*
4026 1.98 msaitoh * Required after D3->D0 transition;
4027 1.98 msaitoh * will re-advertise all previous advertised speeds
4028 1.98 msaitoh */
4029 1.98 msaitoh if (ifp->if_flags & IFF_UP)
4030 1.333 msaitoh ixgbe_init_locked(sc);
4031 1.34 msaitoh
4032 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4033 1.1 dyoung
4034 1.98 msaitoh return true;
4035 1.99 msaitoh } /* ixgbe_resume */
4036 1.1 dyoung
4037 1.98 msaitoh /*
4038 1.98 msaitoh * Set the various hardware offload abilities.
4039 1.98 msaitoh *
4040 1.98 msaitoh * This takes the ifnet's if_capenable flags (e.g. set by the user using
4041 1.98 msaitoh * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
4042 1.98 msaitoh * mbuf offload flags the driver will understand.
4043 1.98 msaitoh */
4044 1.1 dyoung static void
4045 1.333 msaitoh ixgbe_set_if_hwassist(struct ixgbe_softc *sc)
4046 1.1 dyoung {
4047 1.98 msaitoh /* XXX */
4048 1.1 dyoung }
4049 1.1 dyoung
4050 1.99 msaitoh /************************************************************************
4051 1.99 msaitoh * ixgbe_init_locked - Init entry point
4052 1.99 msaitoh *
4053 1.99 msaitoh * Used in two ways: It is used by the stack as an init
4054 1.99 msaitoh * entry point in network interface structure. It is also
4055 1.99 msaitoh * used by the driver as a hw/sw initialization routine to
4056 1.99 msaitoh * get to a consistent state.
4057 1.1 dyoung *
4058 1.99 msaitoh * return 0 on success, positive on failure
4059 1.99 msaitoh ************************************************************************/
4060 1.98 msaitoh static void
4061 1.333 msaitoh ixgbe_init_locked(struct ixgbe_softc *sc)
4062 1.1 dyoung {
4063 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4064 1.333 msaitoh device_t dev = sc->dev;
4065 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4066 1.157 msaitoh struct ix_queue *que;
4067 1.186 msaitoh struct tx_ring *txr;
4068 1.186 msaitoh struct rx_ring *rxr;
4069 1.98 msaitoh u32 txdctl, mhadd;
4070 1.98 msaitoh u32 rxdctl, rxctrl;
4071 1.186 msaitoh u32 ctrl_ext;
4072 1.219 msaitoh bool unsupported_sfp = false;
4073 1.283 msaitoh int i, j, error;
4074 1.1 dyoung
4075 1.98 msaitoh /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
4076 1.1 dyoung
4077 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4078 1.98 msaitoh INIT_DEBUGOUT("ixgbe_init_locked: begin");
4079 1.1 dyoung
4080 1.219 msaitoh hw->need_unsupported_sfp_recovery = false;
4081 1.98 msaitoh hw->adapter_stopped = FALSE;
4082 1.98 msaitoh ixgbe_stop_adapter(hw);
4083 1.333 msaitoh callout_stop(&sc->timer);
4084 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4085 1.333 msaitoh callout_stop(&sc->recovery_mode_timer);
4086 1.333 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
4087 1.157 msaitoh que->disabled_count = 0;
4088 1.1 dyoung
4089 1.98 msaitoh /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4090 1.333 msaitoh sc->max_frame_size =
4091 1.98 msaitoh ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4092 1.1 dyoung
4093 1.98 msaitoh /* Queue indices may change with IOV mode */
4094 1.333 msaitoh ixgbe_align_all_queue_indices(sc);
4095 1.99 msaitoh
4096 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
4097 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
4098 1.1 dyoung
4099 1.98 msaitoh /* Get the latest mac address, User can use a LAA */
4100 1.98 msaitoh memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4101 1.98 msaitoh IXGBE_ETH_LENGTH_OF_ADDRESS);
4102 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
4103 1.98 msaitoh hw->addr_ctrl.rar_used_count = 1;
4104 1.1 dyoung
4105 1.98 msaitoh /* Set hardware offload abilities from ifnet flags */
4106 1.333 msaitoh ixgbe_set_if_hwassist(sc);
4107 1.48 msaitoh
4108 1.98 msaitoh /* Prepare transmit descriptors and buffers */
4109 1.333 msaitoh if (ixgbe_setup_transmit_structures(sc)) {
4110 1.98 msaitoh device_printf(dev, "Could not setup transmit structures\n");
4111 1.333 msaitoh ixgbe_stop_locked(sc);
4112 1.98 msaitoh return;
4113 1.98 msaitoh }
4114 1.1 dyoung
4115 1.98 msaitoh ixgbe_init_hw(hw);
4116 1.144 msaitoh
4117 1.333 msaitoh ixgbe_initialize_iov(sc);
4118 1.144 msaitoh
4119 1.333 msaitoh ixgbe_initialize_transmit_units(sc);
4120 1.1 dyoung
4121 1.98 msaitoh /* Setup Multicast table */
4122 1.333 msaitoh ixgbe_set_rxfilter(sc);
4123 1.43 msaitoh
4124 1.289 msaitoh /* Use fixed buffer size, even for jumbo frames */
4125 1.333 msaitoh sc->rx_mbuf_sz = MCLBYTES;
4126 1.43 msaitoh
4127 1.98 msaitoh /* Prepare receive descriptors and buffers */
4128 1.333 msaitoh error = ixgbe_setup_receive_structures(sc);
4129 1.283 msaitoh if (error) {
4130 1.283 msaitoh device_printf(dev,
4131 1.283 msaitoh "Could not setup receive structures (err = %d)\n", error);
4132 1.333 msaitoh ixgbe_stop_locked(sc);
4133 1.98 msaitoh return;
4134 1.98 msaitoh }
4135 1.43 msaitoh
4136 1.98 msaitoh /* Configure RX settings */
4137 1.333 msaitoh ixgbe_initialize_receive_units(sc);
4138 1.43 msaitoh
4139 1.233 msaitoh /* Initialize variable holding task enqueue requests interrupts */
4140 1.333 msaitoh sc->task_requests = 0;
4141 1.233 msaitoh
4142 1.99 msaitoh /* Enable SDP & MSI-X interrupts based on adapter */
4143 1.333 msaitoh ixgbe_config_gpie(sc);
4144 1.43 msaitoh
4145 1.98 msaitoh /* Set MTU size */
4146 1.98 msaitoh if (ifp->if_mtu > ETHERMTU) {
4147 1.98 msaitoh /* aka IXGBE_MAXFRS on 82599 and newer */
4148 1.98 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4149 1.98 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
4150 1.333 msaitoh mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4151 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4152 1.55 msaitoh }
4153 1.55 msaitoh
4154 1.98 msaitoh /* Now enable all the queues */
4155 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4156 1.333 msaitoh txr = &sc->tx_rings[i];
4157 1.98 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4158 1.98 msaitoh txdctl |= IXGBE_TXDCTL_ENABLE;
4159 1.98 msaitoh /* Set WTHRESH to 8, burst writeback */
4160 1.292 msaitoh txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4161 1.98 msaitoh /*
4162 1.98 msaitoh * When the internal queue falls below PTHRESH (32),
4163 1.98 msaitoh * start prefetching as long as there are at least
4164 1.98 msaitoh * HTHRESH (1) buffers ready. The values are taken
4165 1.98 msaitoh * from the Intel linux driver 3.8.21.
4166 1.98 msaitoh * Prefetching enables tx line rate even with 1 queue.
4167 1.98 msaitoh */
4168 1.98 msaitoh txdctl |= (32 << 0) | (1 << 8);
4169 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4170 1.55 msaitoh }
4171 1.43 msaitoh
4172 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4173 1.333 msaitoh rxr = &sc->rx_rings[i];
4174 1.98 msaitoh rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4175 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4176 1.98 msaitoh /*
4177 1.99 msaitoh * PTHRESH = 21
4178 1.99 msaitoh * HTHRESH = 4
4179 1.99 msaitoh * WTHRESH = 8
4180 1.99 msaitoh */
4181 1.98 msaitoh rxdctl &= ~0x3FFFFF;
4182 1.98 msaitoh rxdctl |= 0x080420;
4183 1.98 msaitoh }
4184 1.98 msaitoh rxdctl |= IXGBE_RXDCTL_ENABLE;
4185 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4186 1.144 msaitoh for (j = 0; j < 10; j++) {
4187 1.98 msaitoh if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4188 1.98 msaitoh IXGBE_RXDCTL_ENABLE)
4189 1.98 msaitoh break;
4190 1.98 msaitoh else
4191 1.98 msaitoh msec_delay(1);
4192 1.55 msaitoh }
4193 1.217 msaitoh IXGBE_WRITE_BARRIER(hw);
4194 1.99 msaitoh
4195 1.98 msaitoh /*
4196 1.98 msaitoh * In netmap mode, we must preserve the buffers made
4197 1.98 msaitoh * available to userspace before the if_init()
4198 1.98 msaitoh * (this is true by default on the TX side, because
4199 1.98 msaitoh * init makes all buffers available to userspace).
4200 1.98 msaitoh *
4201 1.98 msaitoh * netmap_reset() and the device specific routines
4202 1.98 msaitoh * (e.g. ixgbe_setup_receive_rings()) map these
4203 1.98 msaitoh * buffers at the end of the NIC ring, so here we
4204 1.98 msaitoh * must set the RDT (tail) register to make sure
4205 1.98 msaitoh * they are not overwritten.
4206 1.98 msaitoh *
4207 1.98 msaitoh * In this driver the NIC ring starts at RDH = 0,
4208 1.98 msaitoh * RDT points to the last slot available for reception (?),
4209 1.98 msaitoh * so RDT = num_rx_desc - 1 means the whole ring is available.
4210 1.98 msaitoh */
4211 1.99 msaitoh #ifdef DEV_NETMAP
4212 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
4213 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP)) {
4214 1.333 msaitoh struct netmap_adapter *na = NA(sc->ifp);
4215 1.189 msaitoh struct netmap_kring *kring = na->rx_rings[i];
4216 1.98 msaitoh int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4217 1.98 msaitoh
4218 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4219 1.98 msaitoh } else
4220 1.98 msaitoh #endif /* DEV_NETMAP */
4221 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4222 1.333 msaitoh sc->num_rx_desc - 1);
4223 1.48 msaitoh }
4224 1.98 msaitoh
4225 1.98 msaitoh /* Enable Receive engine */
4226 1.98 msaitoh rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4227 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4228 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_DMBYPS;
4229 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_RXEN;
4230 1.98 msaitoh ixgbe_enable_rx_dma(hw, rxctrl);
4231 1.98 msaitoh
4232 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4233 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4234 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4235 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4236 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4237 1.98 msaitoh
4238 1.144 msaitoh /* Set up MSI/MSI-X routing */
4239 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4240 1.333 msaitoh ixgbe_configure_ivars(sc);
4241 1.98 msaitoh /* Set up auto-mask */
4242 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4243 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4244 1.98 msaitoh else {
4245 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4246 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4247 1.55 msaitoh }
4248 1.98 msaitoh } else { /* Simple settings for Legacy/MSI */
4249 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 0);
4250 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 1);
4251 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4252 1.55 msaitoh }
4253 1.43 msaitoh
4254 1.333 msaitoh ixgbe_init_fdir(sc);
4255 1.98 msaitoh
4256 1.98 msaitoh /*
4257 1.98 msaitoh * Check on any SFP devices that
4258 1.98 msaitoh * need to be kick-started
4259 1.98 msaitoh */
4260 1.98 msaitoh if (hw->phy.type == ixgbe_phy_none) {
4261 1.283 msaitoh error = hw->phy.ops.identify(hw);
4262 1.283 msaitoh if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4263 1.219 msaitoh unsupported_sfp = true;
4264 1.219 msaitoh } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4265 1.219 msaitoh unsupported_sfp = true;
4266 1.219 msaitoh
4267 1.219 msaitoh if (unsupported_sfp)
4268 1.219 msaitoh device_printf(dev,
4269 1.219 msaitoh "Unsupported SFP+ module type was detected.\n");
4270 1.98 msaitoh
4271 1.98 msaitoh /* Set moderation on the Link interrupt */
4272 1.333 msaitoh ixgbe_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
4273 1.98 msaitoh
4274 1.173 msaitoh /* Enable EEE power saving */
4275 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
4276 1.173 msaitoh hw->mac.ops.setup_eee(hw,
4277 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
4278 1.173 msaitoh
4279 1.144 msaitoh /* Enable power to the phy. */
4280 1.219 msaitoh if (!unsupported_sfp) {
4281 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
4282 1.144 msaitoh
4283 1.219 msaitoh /* Config/Enable Link */
4284 1.333 msaitoh ixgbe_config_link(sc);
4285 1.219 msaitoh }
4286 1.55 msaitoh
4287 1.98 msaitoh /* Hardware Packet Buffer & Flow Control setup */
4288 1.333 msaitoh ixgbe_config_delay_values(sc);
4289 1.1 dyoung
4290 1.98 msaitoh /* Initialize the FC settings */
4291 1.98 msaitoh ixgbe_start_hw(hw);
4292 1.1 dyoung
4293 1.98 msaitoh /* Set up VLAN support and filter */
4294 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
4295 1.1 dyoung
4296 1.98 msaitoh /* Setup DMA Coalescing */
4297 1.333 msaitoh ixgbe_config_dmac(sc);
4298 1.98 msaitoh
4299 1.230 msaitoh /* OK to schedule workqueues. */
4300 1.333 msaitoh sc->schedule_wqs_ok = true;
4301 1.230 msaitoh
4302 1.98 msaitoh /* Enable the use of the MBX by the VF's */
4303 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
4304 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4305 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4306 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4307 1.1 dyoung }
4308 1.98 msaitoh
4309 1.123 msaitoh /* Update saved flags. See ixgbe_ifflags_cb() */
4310 1.333 msaitoh sc->if_flags = ifp->if_flags;
4311 1.333 msaitoh sc->ec_capenable = sc->osdep.ec.ec_capenable;
4312 1.123 msaitoh
4313 1.337 msaitoh /* Inform the stack we're ready */
4314 1.98 msaitoh ifp->if_flags |= IFF_RUNNING;
4315 1.98 msaitoh
4316 1.337 msaitoh /* And now turn on interrupts */
4317 1.337 msaitoh ixgbe_enable_intr(sc);
4318 1.337 msaitoh
4319 1.1 dyoung return;
4320 1.99 msaitoh } /* ixgbe_init_locked */
4321 1.1 dyoung
4322 1.99 msaitoh /************************************************************************
4323 1.99 msaitoh * ixgbe_init
4324 1.99 msaitoh ************************************************************************/
4325 1.98 msaitoh static int
4326 1.98 msaitoh ixgbe_init(struct ifnet *ifp)
4327 1.98 msaitoh {
4328 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
4329 1.98 msaitoh
4330 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4331 1.333 msaitoh ixgbe_init_locked(sc);
4332 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4333 1.98 msaitoh
4334 1.98 msaitoh return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4335 1.99 msaitoh } /* ixgbe_init */
4336 1.43 msaitoh
4337 1.99 msaitoh /************************************************************************
4338 1.99 msaitoh * ixgbe_set_ivar
4339 1.99 msaitoh *
4340 1.99 msaitoh * Setup the correct IVAR register for a particular MSI-X interrupt
4341 1.99 msaitoh * (yes this is all very magic and confusing :)
4342 1.99 msaitoh * - entry is the register array entry
4343 1.99 msaitoh * - vector is the MSI-X vector for this queue
4344 1.99 msaitoh * - type is RX/TX/MISC
4345 1.99 msaitoh ************************************************************************/
4346 1.42 msaitoh static void
4347 1.333 msaitoh ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
4348 1.1 dyoung {
4349 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4350 1.98 msaitoh u32 ivar, index;
4351 1.98 msaitoh
4352 1.98 msaitoh vector |= IXGBE_IVAR_ALLOC_VAL;
4353 1.98 msaitoh
4354 1.98 msaitoh switch (hw->mac.type) {
4355 1.98 msaitoh case ixgbe_mac_82598EB:
4356 1.98 msaitoh if (type == -1)
4357 1.98 msaitoh entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4358 1.98 msaitoh else
4359 1.98 msaitoh entry += (type * 64);
4360 1.98 msaitoh index = (entry >> 2) & 0x1F;
4361 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4362 1.198 msaitoh ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4363 1.198 msaitoh ivar |= ((u32)vector << (8 * (entry & 0x3)));
4364 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
4365 1.98 msaitoh break;
4366 1.98 msaitoh case ixgbe_mac_82599EB:
4367 1.98 msaitoh case ixgbe_mac_X540:
4368 1.98 msaitoh case ixgbe_mac_X550:
4369 1.98 msaitoh case ixgbe_mac_X550EM_x:
4370 1.99 msaitoh case ixgbe_mac_X550EM_a:
4371 1.98 msaitoh if (type == -1) { /* MISC IVAR */
4372 1.98 msaitoh index = (entry & 1) * 8;
4373 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4374 1.194 msaitoh ivar &= ~(0xffUL << index);
4375 1.194 msaitoh ivar |= ((u32)vector << index);
4376 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4377 1.98 msaitoh } else { /* RX/TX IVARS */
4378 1.98 msaitoh index = (16 * (entry & 1)) + (8 * type);
4379 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4380 1.194 msaitoh ivar &= ~(0xffUL << index);
4381 1.194 msaitoh ivar |= ((u32)vector << index);
4382 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4383 1.98 msaitoh }
4384 1.135 msaitoh break;
4385 1.98 msaitoh default:
4386 1.98 msaitoh break;
4387 1.98 msaitoh }
4388 1.99 msaitoh } /* ixgbe_set_ivar */
4389 1.1 dyoung
4390 1.99 msaitoh /************************************************************************
4391 1.99 msaitoh * ixgbe_configure_ivars
4392 1.99 msaitoh ************************************************************************/
4393 1.98 msaitoh static void
4394 1.333 msaitoh ixgbe_configure_ivars(struct ixgbe_softc *sc)
4395 1.98 msaitoh {
4396 1.333 msaitoh struct ix_queue *que = sc->queues;
4397 1.186 msaitoh u32 newitr;
4398 1.1 dyoung
4399 1.343 msaitoh if (sc->max_interrupt_rate > 0)
4400 1.343 msaitoh newitr = (4000000 / sc->max_interrupt_rate) & 0x0FF8;
4401 1.98 msaitoh else {
4402 1.48 msaitoh /*
4403 1.99 msaitoh * Disable DMA coalescing if interrupt moderation is
4404 1.99 msaitoh * disabled.
4405 1.99 msaitoh */
4406 1.333 msaitoh sc->dmac = 0;
4407 1.98 msaitoh newitr = 0;
4408 1.98 msaitoh }
4409 1.98 msaitoh
4410 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
4411 1.333 msaitoh struct rx_ring *rxr = &sc->rx_rings[i];
4412 1.333 msaitoh struct tx_ring *txr = &sc->tx_rings[i];
4413 1.98 msaitoh /* First the RX queue entry */
4414 1.333 msaitoh ixgbe_set_ivar(sc, rxr->me, que->msix, 0);
4415 1.98 msaitoh /* ... and the TX */
4416 1.333 msaitoh ixgbe_set_ivar(sc, txr->me, que->msix, 1);
4417 1.98 msaitoh /* Set an Initial EITR value */
4418 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, newitr);
4419 1.138 knakahar /*
4420 1.138 knakahar * To eliminate influence of the previous state.
4421 1.138 knakahar * At this point, Tx/Rx interrupt handler
4422 1.138 knakahar * (ixgbe_msix_que()) cannot be called, so both
4423 1.138 knakahar * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4424 1.138 knakahar */
4425 1.138 knakahar que->eitr_setting = 0;
4426 1.98 msaitoh }
4427 1.98 msaitoh
4428 1.98 msaitoh /* For the Link interrupt */
4429 1.333 msaitoh ixgbe_set_ivar(sc, 1, sc->vector, -1);
4430 1.99 msaitoh } /* ixgbe_configure_ivars */
4431 1.98 msaitoh
4432 1.99 msaitoh /************************************************************************
4433 1.99 msaitoh * ixgbe_config_gpie
4434 1.99 msaitoh ************************************************************************/
4435 1.98 msaitoh static void
4436 1.333 msaitoh ixgbe_config_gpie(struct ixgbe_softc *sc)
4437 1.98 msaitoh {
4438 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4439 1.186 msaitoh u32 gpie;
4440 1.98 msaitoh
4441 1.98 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4442 1.98 msaitoh
4443 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4444 1.99 msaitoh /* Enable Enhanced MSI-X mode */
4445 1.99 msaitoh gpie |= IXGBE_GPIE_MSIX_MODE
4446 1.186 msaitoh | IXGBE_GPIE_EIAME
4447 1.186 msaitoh | IXGBE_GPIE_PBA_SUPPORT
4448 1.186 msaitoh | IXGBE_GPIE_OCD;
4449 1.99 msaitoh }
4450 1.99 msaitoh
4451 1.98 msaitoh /* Fan Failure Interrupt */
4452 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4453 1.98 msaitoh gpie |= IXGBE_SDP1_GPIEN;
4454 1.1 dyoung
4455 1.99 msaitoh /* Thermal Sensor Interrupt */
4456 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4457 1.99 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4458 1.1 dyoung
4459 1.99 msaitoh /* Link detection */
4460 1.99 msaitoh switch (hw->mac.type) {
4461 1.99 msaitoh case ixgbe_mac_82599EB:
4462 1.99 msaitoh gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4463 1.99 msaitoh break;
4464 1.99 msaitoh case ixgbe_mac_X550EM_x:
4465 1.99 msaitoh case ixgbe_mac_X550EM_a:
4466 1.98 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4467 1.99 msaitoh break;
4468 1.99 msaitoh default:
4469 1.99 msaitoh break;
4470 1.1 dyoung }
4471 1.1 dyoung
4472 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4473 1.98 msaitoh
4474 1.99 msaitoh } /* ixgbe_config_gpie */
4475 1.1 dyoung
4476 1.99 msaitoh /************************************************************************
4477 1.99 msaitoh * ixgbe_config_delay_values
4478 1.99 msaitoh *
4479 1.333 msaitoh * Requires sc->max_frame_size to be set.
4480 1.99 msaitoh ************************************************************************/
4481 1.33 msaitoh static void
4482 1.333 msaitoh ixgbe_config_delay_values(struct ixgbe_softc *sc)
4483 1.33 msaitoh {
4484 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4485 1.186 msaitoh u32 rxpb, frame, size, tmp;
4486 1.33 msaitoh
4487 1.333 msaitoh frame = sc->max_frame_size;
4488 1.33 msaitoh
4489 1.98 msaitoh /* Calculate High Water */
4490 1.98 msaitoh switch (hw->mac.type) {
4491 1.98 msaitoh case ixgbe_mac_X540:
4492 1.44 msaitoh case ixgbe_mac_X550:
4493 1.44 msaitoh case ixgbe_mac_X550EM_x:
4494 1.99 msaitoh case ixgbe_mac_X550EM_a:
4495 1.98 msaitoh tmp = IXGBE_DV_X540(frame, frame);
4496 1.44 msaitoh break;
4497 1.44 msaitoh default:
4498 1.98 msaitoh tmp = IXGBE_DV(frame, frame);
4499 1.44 msaitoh break;
4500 1.44 msaitoh }
4501 1.98 msaitoh size = IXGBE_BT2KB(tmp);
4502 1.98 msaitoh rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4503 1.98 msaitoh hw->fc.high_water[0] = rxpb - size;
4504 1.44 msaitoh
4505 1.98 msaitoh /* Now calculate Low Water */
4506 1.98 msaitoh switch (hw->mac.type) {
4507 1.98 msaitoh case ixgbe_mac_X540:
4508 1.98 msaitoh case ixgbe_mac_X550:
4509 1.98 msaitoh case ixgbe_mac_X550EM_x:
4510 1.99 msaitoh case ixgbe_mac_X550EM_a:
4511 1.98 msaitoh tmp = IXGBE_LOW_DV_X540(frame);
4512 1.98 msaitoh break;
4513 1.98 msaitoh default:
4514 1.98 msaitoh tmp = IXGBE_LOW_DV(frame);
4515 1.98 msaitoh break;
4516 1.33 msaitoh }
4517 1.98 msaitoh hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4518 1.33 msaitoh
4519 1.98 msaitoh hw->fc.pause_time = IXGBE_FC_PAUSE;
4520 1.98 msaitoh hw->fc.send_xon = TRUE;
4521 1.99 msaitoh } /* ixgbe_config_delay_values */
4522 1.33 msaitoh
4523 1.99 msaitoh /************************************************************************
4524 1.213 msaitoh * ixgbe_set_rxfilter - Multicast Update
4525 1.1 dyoung *
4526 1.99 msaitoh * Called whenever multicast address list is updated.
4527 1.99 msaitoh ************************************************************************/
4528 1.1 dyoung static void
4529 1.333 msaitoh ixgbe_set_rxfilter(struct ixgbe_softc *sc)
4530 1.1 dyoung {
4531 1.99 msaitoh struct ixgbe_mc_addr *mta;
4532 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4533 1.98 msaitoh u8 *update_ptr;
4534 1.98 msaitoh int mcnt = 0;
4535 1.99 msaitoh u32 fctrl;
4536 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
4537 1.98 msaitoh struct ether_multi *enm;
4538 1.98 msaitoh struct ether_multistep step;
4539 1.98 msaitoh
4540 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4541 1.213 msaitoh IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4542 1.98 msaitoh
4543 1.333 msaitoh mta = sc->mta;
4544 1.98 msaitoh bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4545 1.1 dyoung
4546 1.105 msaitoh ETHER_LOCK(ec);
4547 1.183 ozaki ec->ec_flags &= ~ETHER_F_ALLMULTI;
4548 1.98 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
4549 1.98 msaitoh while (enm != NULL) {
4550 1.98 msaitoh if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4551 1.98 msaitoh (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4552 1.98 msaitoh ETHER_ADDR_LEN) != 0)) {
4553 1.183 ozaki ec->ec_flags |= ETHER_F_ALLMULTI;
4554 1.98 msaitoh break;
4555 1.98 msaitoh }
4556 1.98 msaitoh bcopy(enm->enm_addrlo,
4557 1.98 msaitoh mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4558 1.333 msaitoh mta[mcnt].vmdq = sc->pool;
4559 1.98 msaitoh mcnt++;
4560 1.98 msaitoh ETHER_NEXT_MULTI(step, enm);
4561 1.98 msaitoh }
4562 1.1 dyoung
4563 1.333 msaitoh fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
4564 1.98 msaitoh if (ifp->if_flags & IFF_PROMISC)
4565 1.98 msaitoh fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4566 1.183 ozaki else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4567 1.98 msaitoh fctrl |= IXGBE_FCTRL_MPE;
4568 1.212 msaitoh fctrl &= ~IXGBE_FCTRL_UPE;
4569 1.212 msaitoh } else
4570 1.212 msaitoh fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4571 1.1 dyoung
4572 1.211 msaitoh /* Update multicast filter entries only when it's not ALLMULTI */
4573 1.211 msaitoh if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4574 1.211 msaitoh ETHER_UNLOCK(ec);
4575 1.98 msaitoh update_ptr = (u8 *)mta;
4576 1.333 msaitoh ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
4577 1.99 msaitoh ixgbe_mc_array_itr, TRUE);
4578 1.211 msaitoh } else
4579 1.211 msaitoh ETHER_UNLOCK(ec);
4580 1.332 msaitoh
4581 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
4582 1.213 msaitoh } /* ixgbe_set_rxfilter */
4583 1.1 dyoung
4584 1.99 msaitoh /************************************************************************
4585 1.99 msaitoh * ixgbe_mc_array_itr
4586 1.99 msaitoh *
4587 1.99 msaitoh * An iterator function needed by the multicast shared code.
4588 1.99 msaitoh * It feeds the shared code routine the addresses in the
4589 1.213 msaitoh * array of ixgbe_set_rxfilter() one by one.
4590 1.99 msaitoh ************************************************************************/
4591 1.98 msaitoh static u8 *
4592 1.98 msaitoh ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4593 1.98 msaitoh {
4594 1.98 msaitoh struct ixgbe_mc_addr *mta;
4595 1.1 dyoung
4596 1.98 msaitoh mta = (struct ixgbe_mc_addr *)*update_ptr;
4597 1.98 msaitoh *vmdq = mta->vmdq;
4598 1.33 msaitoh
4599 1.98 msaitoh *update_ptr = (u8*)(mta + 1);
4600 1.99 msaitoh
4601 1.98 msaitoh return (mta->addr);
4602 1.99 msaitoh } /* ixgbe_mc_array_itr */
4603 1.82 msaitoh
4604 1.99 msaitoh /************************************************************************
4605 1.99 msaitoh * ixgbe_local_timer - Timer routine
4606 1.98 msaitoh *
4607 1.99 msaitoh * Checks for link status, updates statistics,
4608 1.99 msaitoh * and runs the watchdog check.
4609 1.99 msaitoh ************************************************************************/
4610 1.98 msaitoh static void
4611 1.98 msaitoh ixgbe_local_timer(void *arg)
4612 1.98 msaitoh {
4613 1.333 msaitoh struct ixgbe_softc *sc = arg;
4614 1.1 dyoung
4615 1.333 msaitoh if (sc->schedule_wqs_ok) {
4616 1.333 msaitoh if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
4617 1.333 msaitoh workqueue_enqueue(sc->timer_wq,
4618 1.333 msaitoh &sc->timer_wc, NULL);
4619 1.233 msaitoh }
4620 1.98 msaitoh }
4621 1.28 msaitoh
4622 1.98 msaitoh static void
4623 1.233 msaitoh ixgbe_handle_timer(struct work *wk, void *context)
4624 1.98 msaitoh {
4625 1.339 msaitoh struct ixgbe_softc *sc = context;
4626 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4627 1.333 msaitoh device_t dev = sc->dev;
4628 1.333 msaitoh struct ix_queue *que = sc->queues;
4629 1.153 msaitoh u64 queues = 0;
4630 1.134 msaitoh u64 v0, v1, v2, v3, v4, v5, v6, v7;
4631 1.153 msaitoh int hung = 0;
4632 1.134 msaitoh int i;
4633 1.1 dyoung
4634 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4635 1.1 dyoung
4636 1.98 msaitoh /* Check for pluggable optics */
4637 1.237 msaitoh if (ixgbe_is_sfp(hw)) {
4638 1.249 msaitoh bool sched_mod_task = false;
4639 1.237 msaitoh
4640 1.249 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4641 1.249 msaitoh /*
4642 1.249 msaitoh * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4643 1.250 msaitoh * any GPIO(SDP). So just schedule TASK_MOD.
4644 1.249 msaitoh */
4645 1.249 msaitoh sched_mod_task = true;
4646 1.249 msaitoh } else {
4647 1.249 msaitoh bool was_full, is_full;
4648 1.249 msaitoh
4649 1.249 msaitoh was_full =
4650 1.249 msaitoh hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4651 1.251 msaitoh is_full = ixgbe_sfp_cage_full(hw);
4652 1.249 msaitoh
4653 1.249 msaitoh /* Do probe if cage state changed */
4654 1.249 msaitoh if (was_full ^ is_full)
4655 1.249 msaitoh sched_mod_task = true;
4656 1.249 msaitoh }
4657 1.249 msaitoh if (sched_mod_task) {
4658 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4659 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4660 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
4661 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4662 1.239 msaitoh }
4663 1.237 msaitoh }
4664 1.1 dyoung
4665 1.333 msaitoh ixgbe_update_link_status(sc);
4666 1.333 msaitoh ixgbe_update_stats_counters(sc);
4667 1.33 msaitoh
4668 1.134 msaitoh /* Update some event counters */
4669 1.134 msaitoh v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4670 1.333 msaitoh que = sc->queues;
4671 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4672 1.186 msaitoh struct tx_ring *txr = que->txr;
4673 1.134 msaitoh
4674 1.134 msaitoh v0 += txr->q_efbig_tx_dma_setup;
4675 1.134 msaitoh v1 += txr->q_mbuf_defrag_failed;
4676 1.134 msaitoh v2 += txr->q_efbig2_tx_dma_setup;
4677 1.134 msaitoh v3 += txr->q_einval_tx_dma_setup;
4678 1.134 msaitoh v4 += txr->q_other_tx_dma_setup;
4679 1.134 msaitoh v5 += txr->q_eagain_tx_dma_setup;
4680 1.134 msaitoh v6 += txr->q_enomem_tx_dma_setup;
4681 1.134 msaitoh v7 += txr->q_tso_err;
4682 1.134 msaitoh }
4683 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
4684 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
4685 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
4686 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
4687 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
4688 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
4689 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
4690 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, v7);
4691 1.134 msaitoh
4692 1.153 msaitoh /*
4693 1.153 msaitoh * Check the TX queues status
4694 1.186 msaitoh * - mark hung queues so we don't schedule on them
4695 1.186 msaitoh * - watchdog only if all queues show hung
4696 1.153 msaitoh */
4697 1.333 msaitoh que = sc->queues;
4698 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4699 1.153 msaitoh /* Keep track of queues with work for soft irq */
4700 1.153 msaitoh if (que->txr->busy)
4701 1.190 msaitoh queues |= 1ULL << que->me;
4702 1.153 msaitoh /*
4703 1.153 msaitoh * Each time txeof runs without cleaning, but there
4704 1.153 msaitoh * are uncleaned descriptors it increments busy. If
4705 1.153 msaitoh * we get to the MAX we declare it hung.
4706 1.153 msaitoh */
4707 1.153 msaitoh if (que->busy == IXGBE_QUEUE_HUNG) {
4708 1.153 msaitoh ++hung;
4709 1.153 msaitoh /* Mark the queue as inactive */
4710 1.333 msaitoh sc->active_queues &= ~(1ULL << que->me);
4711 1.153 msaitoh continue;
4712 1.153 msaitoh } else {
4713 1.153 msaitoh /* Check if we've come back from hung */
4714 1.333 msaitoh if ((sc->active_queues & (1ULL << que->me)) == 0)
4715 1.333 msaitoh sc->active_queues |= 1ULL << que->me;
4716 1.153 msaitoh }
4717 1.153 msaitoh if (que->busy >= IXGBE_MAX_TX_BUSY) {
4718 1.153 msaitoh device_printf(dev,
4719 1.153 msaitoh "Warning queue %d appears to be hung!\n", i);
4720 1.153 msaitoh que->txr->busy = IXGBE_QUEUE_HUNG;
4721 1.153 msaitoh ++hung;
4722 1.153 msaitoh }
4723 1.150 msaitoh }
4724 1.150 msaitoh
4725 1.232 msaitoh /* Only truly watchdog if all queues show hung */
4726 1.333 msaitoh if (hung == sc->num_queues)
4727 1.153 msaitoh goto watchdog;
4728 1.160 msaitoh #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4729 1.153 msaitoh else if (queues != 0) { /* Force an IRQ on queues with work */
4730 1.333 msaitoh que = sc->queues;
4731 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4732 1.139 knakahar mutex_enter(&que->dc_mtx);
4733 1.153 msaitoh if (que->disabled_count == 0)
4734 1.333 msaitoh ixgbe_rearm_queues(sc,
4735 1.153 msaitoh queues & ((u64)1 << i));
4736 1.139 knakahar mutex_exit(&que->dc_mtx);
4737 1.131 knakahar }
4738 1.98 msaitoh }
4739 1.160 msaitoh #endif
4740 1.150 msaitoh
4741 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4742 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4743 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4744 1.153 msaitoh return;
4745 1.1 dyoung
4746 1.153 msaitoh watchdog:
4747 1.333 msaitoh device_printf(sc->dev, "Watchdog timeout -- resetting\n");
4748 1.333 msaitoh sc->ifp->if_flags &= ~IFF_RUNNING;
4749 1.333 msaitoh IXGBE_EVC_ADD(&sc->watchdog_events, 1);
4750 1.333 msaitoh ixgbe_init_locked(sc);
4751 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4752 1.233 msaitoh } /* ixgbe_handle_timer */
4753 1.43 msaitoh
4754 1.99 msaitoh /************************************************************************
4755 1.169 msaitoh * ixgbe_recovery_mode_timer - Recovery mode timer routine
4756 1.169 msaitoh ************************************************************************/
4757 1.169 msaitoh static void
4758 1.169 msaitoh ixgbe_recovery_mode_timer(void *arg)
4759 1.169 msaitoh {
4760 1.333 msaitoh struct ixgbe_softc *sc = arg;
4761 1.233 msaitoh
4762 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
4763 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode_timer_pending,
4764 1.254 msaitoh 0, 1) == 0) {
4765 1.333 msaitoh workqueue_enqueue(sc->recovery_mode_timer_wq,
4766 1.333 msaitoh &sc->recovery_mode_timer_wc, NULL);
4767 1.254 msaitoh }
4768 1.233 msaitoh }
4769 1.233 msaitoh }
4770 1.233 msaitoh
4771 1.233 msaitoh static void
4772 1.233 msaitoh ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4773 1.233 msaitoh {
4774 1.333 msaitoh struct ixgbe_softc *sc = context;
4775 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4776 1.169 msaitoh
4777 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4778 1.169 msaitoh if (ixgbe_fw_recovery_mode(hw)) {
4779 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode, 0, 1) == 0) {
4780 1.169 msaitoh /* Firmware error detected, entering recovery mode */
4781 1.333 msaitoh device_printf(sc->dev,
4782 1.319 msaitoh "Firmware recovery mode detected. Limiting "
4783 1.319 msaitoh "functionality. Refer to the Intel(R) Ethernet "
4784 1.319 msaitoh "Adapters and Devices User Guide for details on "
4785 1.319 msaitoh "firmware recovery mode.\n");
4786 1.169 msaitoh
4787 1.169 msaitoh if (hw->adapter_stopped == FALSE)
4788 1.333 msaitoh ixgbe_stop_locked(sc);
4789 1.169 msaitoh }
4790 1.169 msaitoh } else
4791 1.333 msaitoh atomic_cas_uint(&sc->recovery_mode, 1, 0);
4792 1.169 msaitoh
4793 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
4794 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4795 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4796 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4797 1.233 msaitoh } /* ixgbe_handle_recovery_mode_timer */
4798 1.169 msaitoh
4799 1.169 msaitoh /************************************************************************
4800 1.99 msaitoh * ixgbe_handle_mod - Tasklet for SFP module interrupts
4801 1.273 msaitoh * bool int_en: true if it's called when the interrupt is enabled.
4802 1.99 msaitoh ************************************************************************/
4803 1.1 dyoung static void
4804 1.273 msaitoh ixgbe_handle_mod(void *context, bool int_en)
4805 1.1 dyoung {
4806 1.339 msaitoh struct ixgbe_softc *sc = context;
4807 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4808 1.333 msaitoh device_t dev = sc->dev;
4809 1.249 msaitoh enum ixgbe_sfp_type last_sfp_type;
4810 1.251 msaitoh u32 err;
4811 1.249 msaitoh bool last_unsupported_sfp_recovery;
4812 1.98 msaitoh
4813 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4814 1.257 msaitoh
4815 1.249 msaitoh last_sfp_type = hw->phy.sfp_type;
4816 1.249 msaitoh last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4817 1.333 msaitoh IXGBE_EVC_ADD(&sc->mod_workev, 1);
4818 1.333 msaitoh if (sc->hw.need_crosstalk_fix) {
4819 1.251 msaitoh if ((hw->mac.type != ixgbe_mac_82598EB) &&
4820 1.251 msaitoh !ixgbe_sfp_cage_full(hw))
4821 1.218 msaitoh goto out;
4822 1.98 msaitoh }
4823 1.98 msaitoh
4824 1.98 msaitoh err = hw->phy.ops.identify_sfp(hw);
4825 1.98 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4826 1.249 msaitoh if (last_unsupported_sfp_recovery == false)
4827 1.249 msaitoh device_printf(dev,
4828 1.249 msaitoh "Unsupported SFP+ module type was detected.\n");
4829 1.218 msaitoh goto out;
4830 1.33 msaitoh }
4831 1.33 msaitoh
4832 1.219 msaitoh if (hw->need_unsupported_sfp_recovery) {
4833 1.219 msaitoh device_printf(dev, "Recovering from unsupported SFP\n");
4834 1.219 msaitoh /*
4835 1.219 msaitoh * We could recover the status by calling setup_sfp(),
4836 1.219 msaitoh * setup_link() and some others. It's complex and might not
4837 1.219 msaitoh * work correctly on some unknown cases. To avoid such type of
4838 1.219 msaitoh * problem, call ixgbe_init_locked(). It's simple and safe
4839 1.219 msaitoh * approach.
4840 1.219 msaitoh */
4841 1.333 msaitoh ixgbe_init_locked(sc);
4842 1.249 msaitoh } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4843 1.249 msaitoh (hw->phy.sfp_type != last_sfp_type)) {
4844 1.249 msaitoh /* A module is inserted and changed. */
4845 1.249 msaitoh
4846 1.219 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4847 1.219 msaitoh err = hw->phy.ops.reset(hw);
4848 1.219 msaitoh else {
4849 1.219 msaitoh err = hw->mac.ops.setup_sfp(hw);
4850 1.219 msaitoh hw->phy.sfp_setup_needed = FALSE;
4851 1.219 msaitoh }
4852 1.219 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4853 1.219 msaitoh device_printf(dev,
4854 1.219 msaitoh "Setup failure - unsupported SFP+ module type.\n");
4855 1.219 msaitoh goto out;
4856 1.219 msaitoh }
4857 1.1 dyoung }
4858 1.233 msaitoh
4859 1.218 msaitoh out:
4860 1.233 msaitoh /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4861 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
4862 1.233 msaitoh
4863 1.233 msaitoh /* Adjust media types shown in ifconfig */
4864 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4865 1.333 msaitoh ifmedia_removeall(&sc->media);
4866 1.333 msaitoh ixgbe_add_media_types(sc);
4867 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
4868 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4869 1.233 msaitoh
4870 1.249 msaitoh /*
4871 1.288 andvar * Don't schedule MSF event if the chip is 82598. 82598 doesn't support
4872 1.249 msaitoh * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4873 1.250 msaitoh * flap because the function calls setup_link().
4874 1.249 msaitoh */
4875 1.260 knakahar if (hw->mac.type != ixgbe_mac_82598EB) {
4876 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4877 1.273 msaitoh if (int_en)
4878 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4879 1.273 msaitoh else
4880 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4881 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4882 1.260 knakahar }
4883 1.249 msaitoh
4884 1.233 msaitoh /*
4885 1.233 msaitoh * Don't call ixgbe_schedule_admin_tasklet() because we are on
4886 1.233 msaitoh * the workqueue now.
4887 1.233 msaitoh */
4888 1.99 msaitoh } /* ixgbe_handle_mod */
4889 1.1 dyoung
4890 1.1 dyoung
4891 1.99 msaitoh /************************************************************************
4892 1.99 msaitoh * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4893 1.99 msaitoh ************************************************************************/
4894 1.33 msaitoh static void
4895 1.233 msaitoh ixgbe_handle_msf(void *context)
4896 1.33 msaitoh {
4897 1.339 msaitoh struct ixgbe_softc *sc = context;
4898 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4899 1.186 msaitoh u32 autoneg;
4900 1.186 msaitoh bool negotiate;
4901 1.33 msaitoh
4902 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4903 1.257 msaitoh
4904 1.333 msaitoh IXGBE_EVC_ADD(&sc->msf_workev, 1);
4905 1.33 msaitoh
4906 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
4907 1.98 msaitoh if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4908 1.98 msaitoh hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4909 1.98 msaitoh if (hw->mac.ops.setup_link)
4910 1.98 msaitoh hw->mac.ops.setup_link(hw, autoneg, TRUE);
4911 1.99 msaitoh } /* ixgbe_handle_msf */
4912 1.33 msaitoh
4913 1.99 msaitoh /************************************************************************
4914 1.99 msaitoh * ixgbe_handle_phy - Tasklet for external PHY interrupts
4915 1.99 msaitoh ************************************************************************/
4916 1.1 dyoung static void
4917 1.98 msaitoh ixgbe_handle_phy(void *context)
4918 1.1 dyoung {
4919 1.339 msaitoh struct ixgbe_softc *sc = context;
4920 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4921 1.98 msaitoh int error;
4922 1.1 dyoung
4923 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4924 1.257 msaitoh
4925 1.333 msaitoh IXGBE_EVC_ADD(&sc->phy_workev, 1);
4926 1.98 msaitoh error = hw->phy.ops.handle_lasi(hw);
4927 1.98 msaitoh if (error == IXGBE_ERR_OVERTEMP)
4928 1.333 msaitoh device_printf(sc->dev,
4929 1.98 msaitoh "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4930 1.98 msaitoh " PHY will downshift to lower power state!\n");
4931 1.98 msaitoh else if (error)
4932 1.333 msaitoh device_printf(sc->dev,
4933 1.99 msaitoh "Error handling LASI interrupt: %d\n", error);
4934 1.99 msaitoh } /* ixgbe_handle_phy */
4935 1.1 dyoung
4936 1.98 msaitoh static void
4937 1.233 msaitoh ixgbe_handle_admin(struct work *wk, void *context)
4938 1.233 msaitoh {
4939 1.339 msaitoh struct ixgbe_softc *sc = context;
4940 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4941 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4942 1.260 knakahar u32 task_requests;
4943 1.273 msaitoh u32 eims_enable = 0;
4944 1.260 knakahar
4945 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4946 1.333 msaitoh sc->admin_pending = 0;
4947 1.333 msaitoh task_requests = sc->task_requests;
4948 1.333 msaitoh sc->task_requests = 0;
4949 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4950 1.233 msaitoh
4951 1.233 msaitoh /*
4952 1.233 msaitoh * Hold the IFNET_LOCK across this entire call. This will
4953 1.333 msaitoh * prevent additional changes to sc->phy_layer
4954 1.233 msaitoh * and serialize calls to this tasklet. We cannot hold the
4955 1.233 msaitoh * CORE_LOCK while calling into the ifmedia functions as
4956 1.233 msaitoh * they call ifmedia_lock() and the lock is CORE_LOCK.
4957 1.233 msaitoh */
4958 1.233 msaitoh IFNET_LOCK(ifp);
4959 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4960 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4961 1.333 msaitoh ixgbe_handle_link(sc);
4962 1.273 msaitoh eims_enable |= IXGBE_EIMS_LSC;
4963 1.273 msaitoh }
4964 1.319 msaitoh if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0)
4965 1.333 msaitoh ixgbe_handle_mod(sc, false);
4966 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4967 1.333 msaitoh ixgbe_handle_mod(sc, true);
4968 1.273 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
4969 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4970 1.273 msaitoh else
4971 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4972 1.260 knakahar }
4973 1.273 msaitoh if ((task_requests
4974 1.273 msaitoh & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4975 1.333 msaitoh ixgbe_handle_msf(sc);
4976 1.273 msaitoh if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4977 1.273 msaitoh (hw->mac.type == ixgbe_mac_82599EB))
4978 1.273 msaitoh eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4979 1.260 knakahar }
4980 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4981 1.333 msaitoh ixgbe_handle_phy(sc);
4982 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4983 1.260 knakahar }
4984 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4985 1.333 msaitoh ixgbe_reinit_fdir(sc);
4986 1.273 msaitoh eims_enable |= IXGBE_EIMS_FLOW_DIR;
4987 1.260 knakahar }
4988 1.233 msaitoh #if 0 /* notyet */
4989 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4990 1.333 msaitoh ixgbe_handle_mbx(sc);
4991 1.273 msaitoh eims_enable |= IXGBE_EIMS_MAILBOX;
4992 1.260 knakahar }
4993 1.233 msaitoh #endif
4994 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4995 1.233 msaitoh
4996 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4997 1.233 msaitoh IFNET_UNLOCK(ifp);
4998 1.233 msaitoh } /* ixgbe_handle_admin */
4999 1.233 msaitoh
5000 1.233 msaitoh static void
5001 1.98 msaitoh ixgbe_ifstop(struct ifnet *ifp, int disable)
5002 1.98 msaitoh {
5003 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
5004 1.1 dyoung
5005 1.333 msaitoh IXGBE_CORE_LOCK(sc);
5006 1.333 msaitoh ixgbe_stop_locked(sc);
5007 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
5008 1.223 thorpej
5009 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
5010 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
5011 1.98 msaitoh }
5012 1.1 dyoung
5013 1.99 msaitoh /************************************************************************
5014 1.252 msaitoh * ixgbe_stop_locked - Stop the hardware
5015 1.98 msaitoh *
5016 1.99 msaitoh * Disables all traffic on the adapter by issuing a
5017 1.99 msaitoh * global reset on the MAC and deallocates TX/RX buffers.
5018 1.99 msaitoh ************************************************************************/
5019 1.1 dyoung static void
5020 1.252 msaitoh ixgbe_stop_locked(void *arg)
5021 1.1 dyoung {
5022 1.186 msaitoh struct ifnet *ifp;
5023 1.339 msaitoh struct ixgbe_softc *sc = arg;
5024 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5025 1.99 msaitoh
5026 1.333 msaitoh ifp = sc->ifp;
5027 1.98 msaitoh
5028 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5029 1.98 msaitoh
5030 1.252 msaitoh INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
5031 1.333 msaitoh ixgbe_disable_intr(sc);
5032 1.333 msaitoh callout_stop(&sc->timer);
5033 1.98 msaitoh
5034 1.223 thorpej /* Don't schedule workqueues. */
5035 1.333 msaitoh sc->schedule_wqs_ok = false;
5036 1.223 thorpej
5037 1.98 msaitoh /* Let the stack know...*/
5038 1.98 msaitoh ifp->if_flags &= ~IFF_RUNNING;
5039 1.98 msaitoh
5040 1.98 msaitoh ixgbe_reset_hw(hw);
5041 1.98 msaitoh hw->adapter_stopped = FALSE;
5042 1.98 msaitoh ixgbe_stop_adapter(hw);
5043 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82599EB)
5044 1.98 msaitoh ixgbe_stop_mac_link_on_d3_82599(hw);
5045 1.98 msaitoh /* Turn off the laser - noop with no optics */
5046 1.98 msaitoh ixgbe_disable_tx_laser(hw);
5047 1.1 dyoung
5048 1.98 msaitoh /* Update the stack */
5049 1.333 msaitoh sc->link_up = FALSE;
5050 1.333 msaitoh ixgbe_update_link_status(sc);
5051 1.1 dyoung
5052 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
5053 1.333 msaitoh ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
5054 1.1 dyoung
5055 1.98 msaitoh return;
5056 1.252 msaitoh } /* ixgbe_stop_locked */
5057 1.1 dyoung
5058 1.99 msaitoh /************************************************************************
5059 1.99 msaitoh * ixgbe_update_link_status - Update OS on link state
5060 1.99 msaitoh *
5061 1.99 msaitoh * Note: Only updates the OS on the cached link state.
5062 1.186 msaitoh * The real check of the hardware only happens with
5063 1.186 msaitoh * a link interrupt.
5064 1.99 msaitoh ************************************************************************/
5065 1.98 msaitoh static void
5066 1.333 msaitoh ixgbe_update_link_status(struct ixgbe_softc *sc)
5067 1.1 dyoung {
5068 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5069 1.333 msaitoh device_t dev = sc->dev;
5070 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5071 1.98 msaitoh
5072 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5073 1.136 knakahar
5074 1.333 msaitoh if (sc->link_up) {
5075 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
5076 1.138 knakahar /*
5077 1.138 knakahar * To eliminate influence of the previous state
5078 1.138 knakahar * in the same way as ixgbe_init_locked().
5079 1.138 knakahar */
5080 1.333 msaitoh struct ix_queue *que = sc->queues;
5081 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5082 1.138 knakahar que->eitr_setting = 0;
5083 1.138 knakahar
5084 1.344 msaitoh if (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL) {
5085 1.98 msaitoh /*
5086 1.98 msaitoh * Discard count for both MAC Local Fault and
5087 1.98 msaitoh * Remote Fault because those registers are
5088 1.98 msaitoh * valid only when the link speed is up and
5089 1.98 msaitoh * 10Gbps.
5090 1.98 msaitoh */
5091 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MLFC);
5092 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MRFC);
5093 1.98 msaitoh }
5094 1.98 msaitoh
5095 1.98 msaitoh if (bootverbose) {
5096 1.98 msaitoh const char *bpsmsg;
5097 1.1 dyoung
5098 1.333 msaitoh switch (sc->link_speed) {
5099 1.98 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
5100 1.98 msaitoh bpsmsg = "10 Gbps";
5101 1.98 msaitoh break;
5102 1.98 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
5103 1.98 msaitoh bpsmsg = "5 Gbps";
5104 1.98 msaitoh break;
5105 1.98 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
5106 1.98 msaitoh bpsmsg = "2.5 Gbps";
5107 1.98 msaitoh break;
5108 1.98 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
5109 1.98 msaitoh bpsmsg = "1 Gbps";
5110 1.98 msaitoh break;
5111 1.98 msaitoh case IXGBE_LINK_SPEED_100_FULL:
5112 1.98 msaitoh bpsmsg = "100 Mbps";
5113 1.98 msaitoh break;
5114 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
5115 1.99 msaitoh bpsmsg = "10 Mbps";
5116 1.99 msaitoh break;
5117 1.98 msaitoh default:
5118 1.98 msaitoh bpsmsg = "unknown speed";
5119 1.98 msaitoh break;
5120 1.98 msaitoh }
5121 1.98 msaitoh device_printf(dev, "Link is up %s %s \n",
5122 1.98 msaitoh bpsmsg, "Full Duplex");
5123 1.98 msaitoh }
5124 1.333 msaitoh sc->link_active = LINK_STATE_UP;
5125 1.98 msaitoh /* Update any Flow Control changes */
5126 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5127 1.98 msaitoh /* Update DMA coalescing config */
5128 1.333 msaitoh ixgbe_config_dmac(sc);
5129 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_UP);
5130 1.144 msaitoh
5131 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5132 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5133 1.98 msaitoh }
5134 1.174 msaitoh } else {
5135 1.174 msaitoh /*
5136 1.174 msaitoh * Do it when link active changes to DOWN. i.e.
5137 1.174 msaitoh * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5138 1.186 msaitoh * b) LINK_STATE_UP -> LINK_STATE_DOWN
5139 1.174 msaitoh */
5140 1.333 msaitoh if (sc->link_active != LINK_STATE_DOWN) {
5141 1.98 msaitoh if (bootverbose)
5142 1.98 msaitoh device_printf(dev, "Link is Down\n");
5143 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_DOWN);
5144 1.333 msaitoh sc->link_active = LINK_STATE_DOWN;
5145 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5146 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5147 1.333 msaitoh ixgbe_drain_all(sc);
5148 1.98 msaitoh }
5149 1.1 dyoung }
5150 1.99 msaitoh } /* ixgbe_update_link_status */
5151 1.1 dyoung
5152 1.99 msaitoh /************************************************************************
5153 1.99 msaitoh * ixgbe_config_dmac - Configure DMA Coalescing
5154 1.99 msaitoh ************************************************************************/
5155 1.1 dyoung static void
5156 1.333 msaitoh ixgbe_config_dmac(struct ixgbe_softc *sc)
5157 1.1 dyoung {
5158 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5159 1.98 msaitoh struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5160 1.1 dyoung
5161 1.99 msaitoh if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5162 1.98 msaitoh return;
5163 1.65 msaitoh
5164 1.333 msaitoh if (dcfg->watchdog_timer ^ sc->dmac ||
5165 1.333 msaitoh dcfg->link_speed ^ sc->link_speed) {
5166 1.333 msaitoh dcfg->watchdog_timer = sc->dmac;
5167 1.98 msaitoh dcfg->fcoe_en = false;
5168 1.333 msaitoh dcfg->link_speed = sc->link_speed;
5169 1.98 msaitoh dcfg->num_tcs = 1;
5170 1.51 msaitoh
5171 1.98 msaitoh INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5172 1.98 msaitoh dcfg->watchdog_timer, dcfg->link_speed);
5173 1.51 msaitoh
5174 1.98 msaitoh hw->mac.ops.dmac_config(hw);
5175 1.98 msaitoh }
5176 1.99 msaitoh } /* ixgbe_config_dmac */
5177 1.51 msaitoh
5178 1.99 msaitoh /************************************************************************
5179 1.99 msaitoh * ixgbe_enable_intr
5180 1.99 msaitoh ************************************************************************/
5181 1.98 msaitoh static void
5182 1.333 msaitoh ixgbe_enable_intr(struct ixgbe_softc *sc)
5183 1.98 msaitoh {
5184 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5185 1.333 msaitoh struct ix_queue *que = sc->queues;
5186 1.98 msaitoh u32 mask, fwsm;
5187 1.51 msaitoh
5188 1.98 msaitoh mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5189 1.45 msaitoh
5190 1.333 msaitoh switch (sc->hw.mac.type) {
5191 1.99 msaitoh case ixgbe_mac_82599EB:
5192 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5193 1.99 msaitoh /* Temperature sensor on some adapters */
5194 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0;
5195 1.99 msaitoh /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5196 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5197 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP2;
5198 1.99 msaitoh break;
5199 1.99 msaitoh case ixgbe_mac_X540:
5200 1.99 msaitoh /* Detect if Thermal Sensor is enabled */
5201 1.99 msaitoh fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5202 1.99 msaitoh if (fwsm & IXGBE_FWSM_TS_ENABLED)
5203 1.98 msaitoh mask |= IXGBE_EIMS_TS;
5204 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5205 1.99 msaitoh break;
5206 1.99 msaitoh case ixgbe_mac_X550:
5207 1.99 msaitoh /* MAC thermal sensor is automatically enabled */
5208 1.99 msaitoh mask |= IXGBE_EIMS_TS;
5209 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5210 1.99 msaitoh break;
5211 1.99 msaitoh case ixgbe_mac_X550EM_x:
5212 1.99 msaitoh case ixgbe_mac_X550EM_a:
5213 1.99 msaitoh /* Some devices use SDP0 for important information */
5214 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5215 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5216 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5217 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5218 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5219 1.99 msaitoh if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5220 1.99 msaitoh mask |= IXGBE_EICR_GPI_SDP0_X540;
5221 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5222 1.99 msaitoh break;
5223 1.99 msaitoh default:
5224 1.99 msaitoh break;
5225 1.1 dyoung }
5226 1.51 msaitoh
5227 1.99 msaitoh /* Enable Fan Failure detection */
5228 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
5229 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5230 1.99 msaitoh /* Enable SR-IOV */
5231 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5232 1.99 msaitoh mask |= IXGBE_EIMS_MAILBOX;
5233 1.99 msaitoh /* Enable Flow Director */
5234 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FDIR)
5235 1.99 msaitoh mask |= IXGBE_EIMS_FLOW_DIR;
5236 1.99 msaitoh
5237 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5238 1.64 msaitoh
5239 1.98 msaitoh /* With MSI-X we use auto clear */
5240 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0) {
5241 1.270 msaitoh /*
5242 1.309 msaitoh * We use auto clear for RTX_QUEUE only. Don't use other
5243 1.309 msaitoh * interrupts (e.g. link interrupt). BTW, we don't use
5244 1.309 msaitoh * TCP_TIMER interrupt itself.
5245 1.270 msaitoh */
5246 1.270 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5247 1.98 msaitoh }
5248 1.1 dyoung
5249 1.98 msaitoh /*
5250 1.99 msaitoh * Now enable all queues, this is done separately to
5251 1.99 msaitoh * allow for handling the extended (beyond 32) MSI-X
5252 1.99 msaitoh * vectors that can be used by 82599
5253 1.99 msaitoh */
5254 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5255 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
5256 1.1 dyoung
5257 1.98 msaitoh IXGBE_WRITE_FLUSH(hw);
5258 1.43 msaitoh
5259 1.99 msaitoh } /* ixgbe_enable_intr */
5260 1.1 dyoung
5261 1.99 msaitoh /************************************************************************
5262 1.139 knakahar * ixgbe_disable_intr_internal
5263 1.99 msaitoh ************************************************************************/
5264 1.44 msaitoh static void
5265 1.333 msaitoh ixgbe_disable_intr_internal(struct ixgbe_softc *sc, bool nestok)
5266 1.44 msaitoh {
5267 1.333 msaitoh struct ix_queue *que = sc->queues;
5268 1.127 knakahar
5269 1.127 knakahar /* disable interrupts other than queues */
5270 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5271 1.127 knakahar
5272 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0)
5273 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
5274 1.127 knakahar
5275 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5276 1.333 msaitoh ixgbe_disable_queue_internal(sc, que->msix, nestok);
5277 1.127 knakahar
5278 1.333 msaitoh IXGBE_WRITE_FLUSH(&sc->hw);
5279 1.99 msaitoh
5280 1.139 knakahar } /* ixgbe_do_disable_intr_internal */
5281 1.139 knakahar
5282 1.139 knakahar /************************************************************************
5283 1.139 knakahar * ixgbe_disable_intr
5284 1.139 knakahar ************************************************************************/
5285 1.139 knakahar static void
5286 1.333 msaitoh ixgbe_disable_intr(struct ixgbe_softc *sc)
5287 1.139 knakahar {
5288 1.139 knakahar
5289 1.333 msaitoh ixgbe_disable_intr_internal(sc, true);
5290 1.99 msaitoh } /* ixgbe_disable_intr */
5291 1.98 msaitoh
5292 1.99 msaitoh /************************************************************************
5293 1.139 knakahar * ixgbe_ensure_disabled_intr
5294 1.139 knakahar ************************************************************************/
5295 1.139 knakahar void
5296 1.333 msaitoh ixgbe_ensure_disabled_intr(struct ixgbe_softc *sc)
5297 1.139 knakahar {
5298 1.139 knakahar
5299 1.333 msaitoh ixgbe_disable_intr_internal(sc, false);
5300 1.139 knakahar } /* ixgbe_ensure_disabled_intr */
5301 1.139 knakahar
5302 1.139 knakahar /************************************************************************
5303 1.99 msaitoh * ixgbe_legacy_irq - Legacy Interrupt Service routine
5304 1.99 msaitoh ************************************************************************/
5305 1.98 msaitoh static int
5306 1.98 msaitoh ixgbe_legacy_irq(void *arg)
5307 1.1 dyoung {
5308 1.98 msaitoh struct ix_queue *que = arg;
5309 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
5310 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5311 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5312 1.341 msaitoh struct tx_ring *txr = sc->tx_rings;
5313 1.277 msaitoh u32 eicr;
5314 1.269 msaitoh u32 eims_orig;
5315 1.273 msaitoh u32 eims_enable = 0;
5316 1.273 msaitoh u32 eims_disable = 0;
5317 1.98 msaitoh
5318 1.269 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5319 1.269 msaitoh /*
5320 1.269 msaitoh * Silicon errata #26 on 82598. Disable all interrupts before reading
5321 1.269 msaitoh * EICR.
5322 1.269 msaitoh */
5323 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5324 1.98 msaitoh
5325 1.268 msaitoh /* Read and clear EICR */
5326 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5327 1.44 msaitoh
5328 1.99 msaitoh if (eicr == 0) {
5329 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.intzero, 1);
5330 1.269 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5331 1.98 msaitoh return 0;
5332 1.98 msaitoh }
5333 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.legint, 1);
5334 1.44 msaitoh
5335 1.272 msaitoh /* Queue (0) intr */
5336 1.308 msaitoh if (((ifp->if_flags & IFF_RUNNING) != 0) &&
5337 1.308 msaitoh (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5338 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
5339 1.272 msaitoh
5340 1.147 knakahar /*
5341 1.265 msaitoh * The same as ixgbe_msix_que() about
5342 1.265 msaitoh * "que->txrx_use_workqueue".
5343 1.147 knakahar */
5344 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
5345 1.147 knakahar
5346 1.98 msaitoh IXGBE_TX_LOCK(txr);
5347 1.98 msaitoh ixgbe_txeof(txr);
5348 1.99 msaitoh #ifdef notyet
5349 1.99 msaitoh if (!ixgbe_ring_empty(ifp, txr->br))
5350 1.99 msaitoh ixgbe_start_locked(ifp, txr);
5351 1.99 msaitoh #endif
5352 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
5353 1.271 msaitoh
5354 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
5355 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
5356 1.273 msaitoh /* Disable queue 0 interrupt */
5357 1.273 msaitoh eims_disable |= 1UL << 0;
5358 1.273 msaitoh } else
5359 1.317 msaitoh eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE;
5360 1.44 msaitoh
5361 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
5362 1.233 msaitoh
5363 1.273 msaitoh /* Re-enable some interrupts */
5364 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5365 1.273 msaitoh (eims_orig & ~eims_disable) | eims_enable);
5366 1.99 msaitoh
5367 1.98 msaitoh return 1;
5368 1.99 msaitoh } /* ixgbe_legacy_irq */
5369 1.98 msaitoh
5370 1.99 msaitoh /************************************************************************
5371 1.119 msaitoh * ixgbe_free_pciintr_resources
5372 1.99 msaitoh ************************************************************************/
5373 1.98 msaitoh static void
5374 1.333 msaitoh ixgbe_free_pciintr_resources(struct ixgbe_softc *sc)
5375 1.44 msaitoh {
5376 1.333 msaitoh struct ix_queue *que = sc->queues;
5377 1.98 msaitoh int rid;
5378 1.44 msaitoh
5379 1.98 msaitoh /*
5380 1.99 msaitoh * Release all msix queue resources:
5381 1.99 msaitoh */
5382 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
5383 1.119 msaitoh if (que->res != NULL) {
5384 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[i]);
5385 1.333 msaitoh sc->osdep.ihs[i] = NULL;
5386 1.119 msaitoh }
5387 1.58 msaitoh }
5388 1.58 msaitoh
5389 1.98 msaitoh /* Clean the Legacy or Link interrupt last */
5390 1.333 msaitoh if (sc->vector) /* we are doing MSIX */
5391 1.333 msaitoh rid = sc->vector;
5392 1.98 msaitoh else
5393 1.98 msaitoh rid = 0;
5394 1.44 msaitoh
5395 1.333 msaitoh if (sc->osdep.ihs[rid] != NULL) {
5396 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[rid]);
5397 1.333 msaitoh sc->osdep.ihs[rid] = NULL;
5398 1.98 msaitoh }
5399 1.44 msaitoh
5400 1.333 msaitoh if (sc->osdep.intrs != NULL) {
5401 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
5402 1.333 msaitoh sc->osdep.nintrs);
5403 1.333 msaitoh sc->osdep.intrs = NULL;
5404 1.119 msaitoh }
5405 1.119 msaitoh } /* ixgbe_free_pciintr_resources */
5406 1.119 msaitoh
5407 1.119 msaitoh /************************************************************************
5408 1.119 msaitoh * ixgbe_free_pci_resources
5409 1.119 msaitoh ************************************************************************/
5410 1.119 msaitoh static void
5411 1.333 msaitoh ixgbe_free_pci_resources(struct ixgbe_softc *sc)
5412 1.119 msaitoh {
5413 1.119 msaitoh
5414 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
5415 1.44 msaitoh
5416 1.333 msaitoh if (sc->osdep.mem_size != 0) {
5417 1.333 msaitoh bus_space_unmap(sc->osdep.mem_bus_space_tag,
5418 1.333 msaitoh sc->osdep.mem_bus_space_handle,
5419 1.333 msaitoh sc->osdep.mem_size);
5420 1.44 msaitoh }
5421 1.99 msaitoh } /* ixgbe_free_pci_resources */
5422 1.44 msaitoh
5423 1.99 msaitoh /************************************************************************
5424 1.99 msaitoh * ixgbe_sysctl_flowcntl
5425 1.99 msaitoh *
5426 1.99 msaitoh * SYSCTL wrapper around setting Flow Control
5427 1.99 msaitoh ************************************************************************/
5428 1.98 msaitoh static int
5429 1.98 msaitoh ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5430 1.98 msaitoh {
5431 1.98 msaitoh struct sysctlnode node = *rnode;
5432 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5433 1.99 msaitoh int error, fc;
5434 1.82 msaitoh
5435 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5436 1.169 msaitoh return (EPERM);
5437 1.169 msaitoh
5438 1.333 msaitoh fc = sc->hw.fc.current_mode;
5439 1.98 msaitoh node.sysctl_data = &fc;
5440 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5441 1.98 msaitoh if (error != 0 || newp == NULL)
5442 1.98 msaitoh return error;
5443 1.82 msaitoh
5444 1.98 msaitoh /* Don't bother if it's not changed */
5445 1.333 msaitoh if (fc == sc->hw.fc.current_mode)
5446 1.98 msaitoh return (0);
5447 1.83 msaitoh
5448 1.333 msaitoh return ixgbe_set_flowcntl(sc, fc);
5449 1.99 msaitoh } /* ixgbe_sysctl_flowcntl */
5450 1.1 dyoung
5451 1.99 msaitoh /************************************************************************
5452 1.99 msaitoh * ixgbe_set_flowcntl - Set flow control
5453 1.99 msaitoh *
5454 1.99 msaitoh * Flow control values:
5455 1.99 msaitoh * 0 - off
5456 1.99 msaitoh * 1 - rx pause
5457 1.99 msaitoh * 2 - tx pause
5458 1.99 msaitoh * 3 - full
5459 1.99 msaitoh ************************************************************************/
5460 1.98 msaitoh static int
5461 1.333 msaitoh ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
5462 1.98 msaitoh {
5463 1.98 msaitoh switch (fc) {
5464 1.98 msaitoh case ixgbe_fc_rx_pause:
5465 1.98 msaitoh case ixgbe_fc_tx_pause:
5466 1.98 msaitoh case ixgbe_fc_full:
5467 1.333 msaitoh sc->hw.fc.requested_mode = fc;
5468 1.333 msaitoh if (sc->num_queues > 1)
5469 1.333 msaitoh ixgbe_disable_rx_drop(sc);
5470 1.98 msaitoh break;
5471 1.98 msaitoh case ixgbe_fc_none:
5472 1.333 msaitoh sc->hw.fc.requested_mode = ixgbe_fc_none;
5473 1.333 msaitoh if (sc->num_queues > 1)
5474 1.333 msaitoh ixgbe_enable_rx_drop(sc);
5475 1.98 msaitoh break;
5476 1.98 msaitoh default:
5477 1.98 msaitoh return (EINVAL);
5478 1.1 dyoung }
5479 1.99 msaitoh
5480 1.98 msaitoh #if 0 /* XXX NetBSD */
5481 1.98 msaitoh /* Don't autoneg if forcing a value */
5482 1.333 msaitoh sc->hw.fc.disable_fc_autoneg = TRUE;
5483 1.98 msaitoh #endif
5484 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5485 1.99 msaitoh
5486 1.98 msaitoh return (0);
5487 1.99 msaitoh } /* ixgbe_set_flowcntl */
5488 1.1 dyoung
5489 1.99 msaitoh /************************************************************************
5490 1.99 msaitoh * ixgbe_enable_rx_drop
5491 1.99 msaitoh *
5492 1.99 msaitoh * Enable the hardware to drop packets when the buffer is
5493 1.99 msaitoh * full. This is useful with multiqueue, so that no single
5494 1.99 msaitoh * queue being full stalls the entire RX engine. We only
5495 1.99 msaitoh * enable this when Multiqueue is enabled AND Flow Control
5496 1.99 msaitoh * is disabled.
5497 1.99 msaitoh ************************************************************************/
5498 1.98 msaitoh static void
5499 1.333 msaitoh ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
5500 1.98 msaitoh {
5501 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5502 1.186 msaitoh struct rx_ring *rxr;
5503 1.186 msaitoh u32 srrctl;
5504 1.1 dyoung
5505 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5506 1.333 msaitoh rxr = &sc->rx_rings[i];
5507 1.99 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5508 1.99 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
5509 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5510 1.98 msaitoh }
5511 1.99 msaitoh
5512 1.98 msaitoh /* enable drop for each vf */
5513 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5514 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5515 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5516 1.98 msaitoh IXGBE_QDE_ENABLE));
5517 1.98 msaitoh }
5518 1.99 msaitoh } /* ixgbe_enable_rx_drop */
5519 1.43 msaitoh
5520 1.99 msaitoh /************************************************************************
5521 1.99 msaitoh * ixgbe_disable_rx_drop
5522 1.99 msaitoh ************************************************************************/
5523 1.98 msaitoh static void
5524 1.333 msaitoh ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
5525 1.98 msaitoh {
5526 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5527 1.186 msaitoh struct rx_ring *rxr;
5528 1.186 msaitoh u32 srrctl;
5529 1.43 msaitoh
5530 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5531 1.333 msaitoh rxr = &sc->rx_rings[i];
5532 1.186 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5533 1.186 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5534 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5535 1.98 msaitoh }
5536 1.99 msaitoh
5537 1.98 msaitoh /* disable drop for each vf */
5538 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5539 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5540 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5541 1.1 dyoung }
5542 1.99 msaitoh } /* ixgbe_disable_rx_drop */
5543 1.98 msaitoh
5544 1.99 msaitoh /************************************************************************
5545 1.99 msaitoh * ixgbe_sysctl_advertise
5546 1.99 msaitoh *
5547 1.99 msaitoh * SYSCTL wrapper around setting advertised speed
5548 1.99 msaitoh ************************************************************************/
5549 1.98 msaitoh static int
5550 1.98 msaitoh ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5551 1.98 msaitoh {
5552 1.99 msaitoh struct sysctlnode node = *rnode;
5553 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5554 1.186 msaitoh int error = 0, advertise;
5555 1.1 dyoung
5556 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5557 1.169 msaitoh return (EPERM);
5558 1.169 msaitoh
5559 1.333 msaitoh advertise = sc->advertise;
5560 1.98 msaitoh node.sysctl_data = &advertise;
5561 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5562 1.98 msaitoh if (error != 0 || newp == NULL)
5563 1.98 msaitoh return error;
5564 1.28 msaitoh
5565 1.333 msaitoh return ixgbe_set_advertise(sc, advertise);
5566 1.99 msaitoh } /* ixgbe_sysctl_advertise */
5567 1.1 dyoung
5568 1.99 msaitoh /************************************************************************
5569 1.99 msaitoh * ixgbe_set_advertise - Control advertised link speed
5570 1.99 msaitoh *
5571 1.99 msaitoh * Flags:
5572 1.103 msaitoh * 0x00 - Default (all capable link speed)
5573 1.296 msaitoh * 0x1 - advertise 100 Mb
5574 1.296 msaitoh * 0x2 - advertise 1G
5575 1.296 msaitoh * 0x4 - advertise 10G
5576 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5577 1.103 msaitoh * 0x10 - advertise 2.5G
5578 1.103 msaitoh * 0x20 - advertise 5G
5579 1.99 msaitoh ************************************************************************/
5580 1.98 msaitoh static int
5581 1.333 msaitoh ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
5582 1.1 dyoung {
5583 1.186 msaitoh device_t dev;
5584 1.186 msaitoh struct ixgbe_hw *hw;
5585 1.99 msaitoh ixgbe_link_speed speed = 0;
5586 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5587 1.186 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
5588 1.186 msaitoh bool negotiate = FALSE;
5589 1.98 msaitoh
5590 1.98 msaitoh /* Checks to validate new value */
5591 1.333 msaitoh if (sc->advertise == advertise) /* no change */
5592 1.98 msaitoh return (0);
5593 1.98 msaitoh
5594 1.333 msaitoh dev = sc->dev;
5595 1.333 msaitoh hw = &sc->hw;
5596 1.98 msaitoh
5597 1.98 msaitoh /* No speed changes for backplane media */
5598 1.98 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
5599 1.98 msaitoh return (ENODEV);
5600 1.98 msaitoh
5601 1.98 msaitoh if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5602 1.98 msaitoh (hw->phy.multispeed_fiber))) {
5603 1.98 msaitoh device_printf(dev,
5604 1.98 msaitoh "Advertised speed can only be set on copper or "
5605 1.98 msaitoh "multispeed fiber media types.\n");
5606 1.98 msaitoh return (EINVAL);
5607 1.98 msaitoh }
5608 1.98 msaitoh
5609 1.259 msaitoh if (advertise < 0x0 || advertise > 0x3f) {
5610 1.319 msaitoh device_printf(dev, "Invalid advertised speed; "
5611 1.319 msaitoh "valid modes are 0x0 through 0x3f\n");
5612 1.98 msaitoh return (EINVAL);
5613 1.98 msaitoh }
5614 1.1 dyoung
5615 1.99 msaitoh if (hw->mac.ops.get_link_capabilities) {
5616 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5617 1.99 msaitoh &negotiate);
5618 1.99 msaitoh if (err != IXGBE_SUCCESS) {
5619 1.319 msaitoh device_printf(dev, "Unable to determine supported "
5620 1.319 msaitoh "advertise speeds\n");
5621 1.99 msaitoh return (ENODEV);
5622 1.99 msaitoh }
5623 1.99 msaitoh }
5624 1.99 msaitoh
5625 1.98 msaitoh /* Set new value and report new advertised mode */
5626 1.99 msaitoh if (advertise & 0x1) {
5627 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5628 1.319 msaitoh device_printf(dev, "Interface does not support 100Mb "
5629 1.319 msaitoh "advertised speed\n");
5630 1.98 msaitoh return (EINVAL);
5631 1.98 msaitoh }
5632 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
5633 1.99 msaitoh }
5634 1.99 msaitoh if (advertise & 0x2) {
5635 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5636 1.319 msaitoh device_printf(dev, "Interface does not support 1Gb "
5637 1.319 msaitoh "advertised speed\n");
5638 1.99 msaitoh return (EINVAL);
5639 1.99 msaitoh }
5640 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
5641 1.99 msaitoh }
5642 1.99 msaitoh if (advertise & 0x4) {
5643 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5644 1.319 msaitoh device_printf(dev, "Interface does not support 10Gb "
5645 1.319 msaitoh "advertised speed\n");
5646 1.99 msaitoh return (EINVAL);
5647 1.99 msaitoh }
5648 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
5649 1.99 msaitoh }
5650 1.99 msaitoh if (advertise & 0x8) {
5651 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5652 1.319 msaitoh device_printf(dev, "Interface does not support 10Mb "
5653 1.319 msaitoh "advertised speed\n");
5654 1.99 msaitoh return (EINVAL);
5655 1.99 msaitoh }
5656 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
5657 1.99 msaitoh }
5658 1.103 msaitoh if (advertise & 0x10) {
5659 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5660 1.319 msaitoh device_printf(dev, "Interface does not support 2.5Gb "
5661 1.319 msaitoh "advertised speed\n");
5662 1.103 msaitoh return (EINVAL);
5663 1.103 msaitoh }
5664 1.103 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5665 1.103 msaitoh }
5666 1.103 msaitoh if (advertise & 0x20) {
5667 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5668 1.319 msaitoh device_printf(dev, "Interface does not support 5Gb "
5669 1.319 msaitoh "advertised speed\n");
5670 1.103 msaitoh return (EINVAL);
5671 1.103 msaitoh }
5672 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
5673 1.103 msaitoh }
5674 1.99 msaitoh if (advertise == 0)
5675 1.99 msaitoh speed = link_caps; /* All capable link speed */
5676 1.1 dyoung
5677 1.98 msaitoh hw->mac.autotry_restart = TRUE;
5678 1.98 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
5679 1.333 msaitoh sc->advertise = advertise;
5680 1.1 dyoung
5681 1.99 msaitoh return (0);
5682 1.99 msaitoh } /* ixgbe_set_advertise */
5683 1.1 dyoung
5684 1.99 msaitoh /************************************************************************
5685 1.296 msaitoh * ixgbe_get_default_advertise - Get default advertised speed settings
5686 1.99 msaitoh *
5687 1.99 msaitoh * Formatted for sysctl usage.
5688 1.99 msaitoh * Flags:
5689 1.296 msaitoh * 0x1 - advertise 100 Mb
5690 1.296 msaitoh * 0x2 - advertise 1G
5691 1.296 msaitoh * 0x4 - advertise 10G
5692 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5693 1.103 msaitoh * 0x10 - advertise 2.5G
5694 1.103 msaitoh * 0x20 - advertise 5G
5695 1.99 msaitoh ************************************************************************/
5696 1.98 msaitoh static int
5697 1.333 msaitoh ixgbe_get_default_advertise(struct ixgbe_softc *sc)
5698 1.1 dyoung {
5699 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5700 1.186 msaitoh int speed;
5701 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5702 1.186 msaitoh s32 err;
5703 1.186 msaitoh bool negotiate = FALSE;
5704 1.98 msaitoh
5705 1.99 msaitoh /*
5706 1.99 msaitoh * Advertised speed means nothing unless it's copper or
5707 1.99 msaitoh * multi-speed fiber
5708 1.99 msaitoh */
5709 1.99 msaitoh if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5710 1.99 msaitoh !(hw->phy.multispeed_fiber))
5711 1.99 msaitoh return (0);
5712 1.1 dyoung
5713 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5714 1.99 msaitoh if (err != IXGBE_SUCCESS)
5715 1.99 msaitoh return (0);
5716 1.1 dyoung
5717 1.99 msaitoh speed =
5718 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5719 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5720 1.103 msaitoh ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5721 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5722 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5723 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5724 1.99 msaitoh
5725 1.99 msaitoh return speed;
5726 1.296 msaitoh } /* ixgbe_get_default_advertise */
5727 1.99 msaitoh
5728 1.99 msaitoh /************************************************************************
5729 1.99 msaitoh * ixgbe_sysctl_dmac - Manage DMA Coalescing
5730 1.99 msaitoh *
5731 1.99 msaitoh * Control values:
5732 1.99 msaitoh * 0/1 - off / on (use default value of 1000)
5733 1.99 msaitoh *
5734 1.99 msaitoh * Legal timer values are:
5735 1.99 msaitoh * 50,100,250,500,1000,2000,5000,10000
5736 1.99 msaitoh *
5737 1.99 msaitoh * Turning off interrupt moderation will also turn this off.
5738 1.99 msaitoh ************************************************************************/
5739 1.1 dyoung static int
5740 1.98 msaitoh ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5741 1.1 dyoung {
5742 1.44 msaitoh struct sysctlnode node = *rnode;
5743 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5744 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5745 1.186 msaitoh int error;
5746 1.186 msaitoh int newval;
5747 1.1 dyoung
5748 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5749 1.169 msaitoh return (EPERM);
5750 1.169 msaitoh
5751 1.333 msaitoh newval = sc->dmac;
5752 1.98 msaitoh node.sysctl_data = &newval;
5753 1.22 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5754 1.98 msaitoh if ((error) || (newp == NULL))
5755 1.98 msaitoh return (error);
5756 1.98 msaitoh
5757 1.98 msaitoh switch (newval) {
5758 1.98 msaitoh case 0:
5759 1.98 msaitoh /* Disabled */
5760 1.333 msaitoh sc->dmac = 0;
5761 1.98 msaitoh break;
5762 1.98 msaitoh case 1:
5763 1.98 msaitoh /* Enable and use default */
5764 1.333 msaitoh sc->dmac = 1000;
5765 1.98 msaitoh break;
5766 1.98 msaitoh case 50:
5767 1.98 msaitoh case 100:
5768 1.98 msaitoh case 250:
5769 1.98 msaitoh case 500:
5770 1.98 msaitoh case 1000:
5771 1.98 msaitoh case 2000:
5772 1.98 msaitoh case 5000:
5773 1.98 msaitoh case 10000:
5774 1.98 msaitoh /* Legal values - allow */
5775 1.333 msaitoh sc->dmac = newval;
5776 1.98 msaitoh break;
5777 1.98 msaitoh default:
5778 1.98 msaitoh /* Do nothing, illegal value */
5779 1.98 msaitoh return (EINVAL);
5780 1.22 msaitoh }
5781 1.1 dyoung
5782 1.98 msaitoh /* Re-initialize hardware if it's already running */
5783 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING)
5784 1.302 riastrad if_init(ifp);
5785 1.1 dyoung
5786 1.98 msaitoh return (0);
5787 1.1 dyoung }
5788 1.1 dyoung
5789 1.98 msaitoh #ifdef IXGBE_DEBUG
5790 1.99 msaitoh /************************************************************************
5791 1.99 msaitoh * ixgbe_sysctl_power_state
5792 1.99 msaitoh *
5793 1.99 msaitoh * Sysctl to test power states
5794 1.99 msaitoh * Values:
5795 1.99 msaitoh * 0 - set device to D0
5796 1.99 msaitoh * 3 - set device to D3
5797 1.99 msaitoh * (none) - get current device power state
5798 1.99 msaitoh ************************************************************************/
5799 1.98 msaitoh static int
5800 1.98 msaitoh ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5801 1.44 msaitoh {
5802 1.99 msaitoh #ifdef notyet
5803 1.98 msaitoh struct sysctlnode node = *rnode;
5804 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5805 1.333 msaitoh device_t dev = sc->dev;
5806 1.186 msaitoh int curr_ps, new_ps, error = 0;
5807 1.44 msaitoh
5808 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5809 1.169 msaitoh return (EPERM);
5810 1.169 msaitoh
5811 1.98 msaitoh curr_ps = new_ps = pci_get_powerstate(dev);
5812 1.44 msaitoh
5813 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5814 1.98 msaitoh if ((error) || (req->newp == NULL))
5815 1.98 msaitoh return (error);
5816 1.44 msaitoh
5817 1.98 msaitoh if (new_ps == curr_ps)
5818 1.98 msaitoh return (0);
5819 1.44 msaitoh
5820 1.98 msaitoh if (new_ps == 3 && curr_ps == 0)
5821 1.98 msaitoh error = DEVICE_SUSPEND(dev);
5822 1.98 msaitoh else if (new_ps == 0 && curr_ps == 3)
5823 1.98 msaitoh error = DEVICE_RESUME(dev);
5824 1.98 msaitoh else
5825 1.98 msaitoh return (EINVAL);
5826 1.44 msaitoh
5827 1.98 msaitoh device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5828 1.44 msaitoh
5829 1.98 msaitoh return (error);
5830 1.98 msaitoh #else
5831 1.98 msaitoh return 0;
5832 1.98 msaitoh #endif
5833 1.99 msaitoh } /* ixgbe_sysctl_power_state */
5834 1.98 msaitoh #endif
5835 1.99 msaitoh
5836 1.99 msaitoh /************************************************************************
5837 1.99 msaitoh * ixgbe_sysctl_wol_enable
5838 1.99 msaitoh *
5839 1.99 msaitoh * Sysctl to enable/disable the WoL capability,
5840 1.99 msaitoh * if supported by the adapter.
5841 1.99 msaitoh *
5842 1.99 msaitoh * Values:
5843 1.99 msaitoh * 0 - disabled
5844 1.99 msaitoh * 1 - enabled
5845 1.99 msaitoh ************************************************************************/
5846 1.98 msaitoh static int
5847 1.98 msaitoh ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5848 1.98 msaitoh {
5849 1.98 msaitoh struct sysctlnode node = *rnode;
5850 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5851 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5852 1.186 msaitoh bool new_wol_enabled;
5853 1.186 msaitoh int error = 0;
5854 1.44 msaitoh
5855 1.169 msaitoh /*
5856 1.169 msaitoh * It's not required to check recovery mode because this function never
5857 1.169 msaitoh * touches hardware.
5858 1.169 msaitoh */
5859 1.98 msaitoh new_wol_enabled = hw->wol_enabled;
5860 1.98 msaitoh node.sysctl_data = &new_wol_enabled;
5861 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5862 1.98 msaitoh if ((error) || (newp == NULL))
5863 1.98 msaitoh return (error);
5864 1.98 msaitoh if (new_wol_enabled == hw->wol_enabled)
5865 1.98 msaitoh return (0);
5866 1.44 msaitoh
5867 1.333 msaitoh if (new_wol_enabled && !sc->wol_support)
5868 1.98 msaitoh return (ENODEV);
5869 1.98 msaitoh else
5870 1.98 msaitoh hw->wol_enabled = new_wol_enabled;
5871 1.44 msaitoh
5872 1.98 msaitoh return (0);
5873 1.99 msaitoh } /* ixgbe_sysctl_wol_enable */
5874 1.48 msaitoh
5875 1.99 msaitoh /************************************************************************
5876 1.99 msaitoh * ixgbe_sysctl_wufc - Wake Up Filter Control
5877 1.99 msaitoh *
5878 1.99 msaitoh * Sysctl to enable/disable the types of packets that the
5879 1.99 msaitoh * adapter will wake up on upon receipt.
5880 1.99 msaitoh * Flags:
5881 1.99 msaitoh * 0x1 - Link Status Change
5882 1.99 msaitoh * 0x2 - Magic Packet
5883 1.99 msaitoh * 0x4 - Direct Exact
5884 1.99 msaitoh * 0x8 - Directed Multicast
5885 1.99 msaitoh * 0x10 - Broadcast
5886 1.99 msaitoh * 0x20 - ARP/IPv4 Request Packet
5887 1.99 msaitoh * 0x40 - Direct IPv4 Packet
5888 1.99 msaitoh * 0x80 - Direct IPv6 Packet
5889 1.98 msaitoh *
5890 1.99 msaitoh * Settings not listed above will cause the sysctl to return an error.
5891 1.99 msaitoh ************************************************************************/
5892 1.1 dyoung static int
5893 1.98 msaitoh ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5894 1.1 dyoung {
5895 1.98 msaitoh struct sysctlnode node = *rnode;
5896 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5897 1.98 msaitoh int error = 0;
5898 1.98 msaitoh u32 new_wufc;
5899 1.52 msaitoh
5900 1.169 msaitoh /*
5901 1.169 msaitoh * It's not required to check recovery mode because this function never
5902 1.169 msaitoh * touches hardware.
5903 1.169 msaitoh */
5904 1.333 msaitoh new_wufc = sc->wufc;
5905 1.98 msaitoh node.sysctl_data = &new_wufc;
5906 1.52 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5907 1.98 msaitoh if ((error) || (newp == NULL))
5908 1.98 msaitoh return (error);
5909 1.333 msaitoh if (new_wufc == sc->wufc)
5910 1.98 msaitoh return (0);
5911 1.98 msaitoh
5912 1.98 msaitoh if (new_wufc & 0xffffff00)
5913 1.98 msaitoh return (EINVAL);
5914 1.99 msaitoh
5915 1.99 msaitoh new_wufc &= 0xff;
5916 1.333 msaitoh new_wufc |= (0xffffff & sc->wufc);
5917 1.333 msaitoh sc->wufc = new_wufc;
5918 1.52 msaitoh
5919 1.98 msaitoh return (0);
5920 1.99 msaitoh } /* ixgbe_sysctl_wufc */
5921 1.52 msaitoh
5922 1.98 msaitoh #ifdef IXGBE_DEBUG
5923 1.99 msaitoh /************************************************************************
5924 1.99 msaitoh * ixgbe_sysctl_print_rss_config
5925 1.99 msaitoh ************************************************************************/
5926 1.52 msaitoh static int
5927 1.98 msaitoh ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5928 1.52 msaitoh {
5929 1.99 msaitoh #ifdef notyet
5930 1.99 msaitoh struct sysctlnode node = *rnode;
5931 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5932 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5933 1.333 msaitoh device_t dev = sc->dev;
5934 1.186 msaitoh struct sbuf *buf;
5935 1.186 msaitoh int error = 0, reta_size;
5936 1.186 msaitoh u32 reg;
5937 1.1 dyoung
5938 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5939 1.169 msaitoh return (EPERM);
5940 1.169 msaitoh
5941 1.98 msaitoh buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5942 1.98 msaitoh if (!buf) {
5943 1.98 msaitoh device_printf(dev, "Could not allocate sbuf for output.\n");
5944 1.98 msaitoh return (ENOMEM);
5945 1.98 msaitoh }
5946 1.52 msaitoh
5947 1.98 msaitoh // TODO: use sbufs to make a string to print out
5948 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
5949 1.333 msaitoh switch (sc->hw.mac.type) {
5950 1.98 msaitoh case ixgbe_mac_X550:
5951 1.98 msaitoh case ixgbe_mac_X550EM_x:
5952 1.99 msaitoh case ixgbe_mac_X550EM_a:
5953 1.98 msaitoh reta_size = 128;
5954 1.98 msaitoh break;
5955 1.98 msaitoh default:
5956 1.98 msaitoh reta_size = 32;
5957 1.98 msaitoh break;
5958 1.43 msaitoh }
5959 1.1 dyoung
5960 1.98 msaitoh /* Print out the redirection table */
5961 1.98 msaitoh sbuf_cat(buf, "\n");
5962 1.98 msaitoh for (int i = 0; i < reta_size; i++) {
5963 1.98 msaitoh if (i < 32) {
5964 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5965 1.98 msaitoh sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5966 1.98 msaitoh } else {
5967 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5968 1.98 msaitoh sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5969 1.98 msaitoh }
5970 1.28 msaitoh }
5971 1.1 dyoung
5972 1.98 msaitoh // TODO: print more config
5973 1.43 msaitoh
5974 1.98 msaitoh error = sbuf_finish(buf);
5975 1.98 msaitoh if (error)
5976 1.98 msaitoh device_printf(dev, "Error finishing sbuf: %d\n", error);
5977 1.1 dyoung
5978 1.98 msaitoh sbuf_delete(buf);
5979 1.99 msaitoh #endif
5980 1.98 msaitoh return (0);
5981 1.99 msaitoh } /* ixgbe_sysctl_print_rss_config */
5982 1.98 msaitoh #endif /* IXGBE_DEBUG */
5983 1.24 msaitoh
5984 1.99 msaitoh /************************************************************************
5985 1.99 msaitoh * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5986 1.99 msaitoh *
5987 1.99 msaitoh * For X552/X557-AT devices using an external PHY
5988 1.99 msaitoh ************************************************************************/
5989 1.44 msaitoh static int
5990 1.44 msaitoh ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5991 1.44 msaitoh {
5992 1.44 msaitoh struct sysctlnode node = *rnode;
5993 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5994 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5995 1.44 msaitoh int val;
5996 1.44 msaitoh u16 reg;
5997 1.44 msaitoh int error;
5998 1.44 msaitoh
5999 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6000 1.169 msaitoh return (EPERM);
6001 1.169 msaitoh
6002 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6003 1.325 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6004 1.333 msaitoh device_printf(sc->dev,
6005 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6006 1.44 msaitoh return (ENODEV);
6007 1.44 msaitoh }
6008 1.44 msaitoh
6009 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
6010 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6011 1.333 msaitoh device_printf(sc->dev,
6012 1.44 msaitoh "Error reading from PHY's current temperature register\n");
6013 1.44 msaitoh return (EAGAIN);
6014 1.44 msaitoh }
6015 1.44 msaitoh
6016 1.44 msaitoh node.sysctl_data = &val;
6017 1.44 msaitoh
6018 1.44 msaitoh /* Shift temp for output */
6019 1.44 msaitoh val = reg >> 8;
6020 1.44 msaitoh
6021 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6022 1.44 msaitoh if ((error) || (newp == NULL))
6023 1.44 msaitoh return (error);
6024 1.44 msaitoh
6025 1.44 msaitoh return (0);
6026 1.99 msaitoh } /* ixgbe_sysctl_phy_temp */
6027 1.44 msaitoh
6028 1.99 msaitoh /************************************************************************
6029 1.99 msaitoh * ixgbe_sysctl_phy_overtemp_occurred
6030 1.99 msaitoh *
6031 1.99 msaitoh * Reports (directly from the PHY) whether the current PHY
6032 1.99 msaitoh * temperature is over the overtemp threshold.
6033 1.99 msaitoh ************************************************************************/
6034 1.44 msaitoh static int
6035 1.44 msaitoh ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
6036 1.44 msaitoh {
6037 1.44 msaitoh struct sysctlnode node = *rnode;
6038 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6039 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6040 1.44 msaitoh int val, error;
6041 1.44 msaitoh u16 reg;
6042 1.44 msaitoh
6043 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6044 1.169 msaitoh return (EPERM);
6045 1.169 msaitoh
6046 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6047 1.344 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6048 1.333 msaitoh device_printf(sc->dev,
6049 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6050 1.44 msaitoh return (ENODEV);
6051 1.44 msaitoh }
6052 1.44 msaitoh
6053 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
6054 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6055 1.333 msaitoh device_printf(sc->dev,
6056 1.44 msaitoh "Error reading from PHY's temperature status register\n");
6057 1.44 msaitoh return (EAGAIN);
6058 1.44 msaitoh }
6059 1.44 msaitoh
6060 1.44 msaitoh node.sysctl_data = &val;
6061 1.44 msaitoh
6062 1.44 msaitoh /* Get occurrence bit */
6063 1.44 msaitoh val = !!(reg & 0x4000);
6064 1.44 msaitoh
6065 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6066 1.44 msaitoh if ((error) || (newp == NULL))
6067 1.44 msaitoh return (error);
6068 1.44 msaitoh
6069 1.44 msaitoh return (0);
6070 1.99 msaitoh } /* ixgbe_sysctl_phy_overtemp_occurred */
6071 1.99 msaitoh
6072 1.99 msaitoh /************************************************************************
6073 1.99 msaitoh * ixgbe_sysctl_eee_state
6074 1.99 msaitoh *
6075 1.99 msaitoh * Sysctl to set EEE power saving feature
6076 1.99 msaitoh * Values:
6077 1.99 msaitoh * 0 - disable EEE
6078 1.99 msaitoh * 1 - enable EEE
6079 1.99 msaitoh * (none) - get current device EEE state
6080 1.99 msaitoh ************************************************************************/
6081 1.99 msaitoh static int
6082 1.99 msaitoh ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6083 1.99 msaitoh {
6084 1.99 msaitoh struct sysctlnode node = *rnode;
6085 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6086 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6087 1.333 msaitoh device_t dev = sc->dev;
6088 1.186 msaitoh int curr_eee, new_eee, error = 0;
6089 1.186 msaitoh s32 retval;
6090 1.99 msaitoh
6091 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6092 1.169 msaitoh return (EPERM);
6093 1.169 msaitoh
6094 1.333 msaitoh curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
6095 1.99 msaitoh node.sysctl_data = &new_eee;
6096 1.99 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6097 1.99 msaitoh if ((error) || (newp == NULL))
6098 1.99 msaitoh return (error);
6099 1.99 msaitoh
6100 1.99 msaitoh /* Nothing to do */
6101 1.99 msaitoh if (new_eee == curr_eee)
6102 1.99 msaitoh return (0);
6103 1.99 msaitoh
6104 1.99 msaitoh /* Not supported */
6105 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
6106 1.99 msaitoh return (EINVAL);
6107 1.99 msaitoh
6108 1.99 msaitoh /* Bounds checking */
6109 1.99 msaitoh if ((new_eee < 0) || (new_eee > 1))
6110 1.99 msaitoh return (EINVAL);
6111 1.99 msaitoh
6112 1.333 msaitoh retval = ixgbe_setup_eee(&sc->hw, new_eee);
6113 1.99 msaitoh if (retval) {
6114 1.99 msaitoh device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6115 1.99 msaitoh return (EINVAL);
6116 1.99 msaitoh }
6117 1.99 msaitoh
6118 1.99 msaitoh /* Restart auto-neg */
6119 1.302 riastrad if_init(ifp);
6120 1.99 msaitoh
6121 1.99 msaitoh device_printf(dev, "New EEE state: %d\n", new_eee);
6122 1.99 msaitoh
6123 1.99 msaitoh /* Cache new value */
6124 1.99 msaitoh if (new_eee)
6125 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6126 1.99 msaitoh else
6127 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_EEE;
6128 1.99 msaitoh
6129 1.99 msaitoh return (error);
6130 1.99 msaitoh } /* ixgbe_sysctl_eee_state */
6131 1.99 msaitoh
6132 1.333 msaitoh #define PRINTQS(sc, regname) \
6133 1.158 msaitoh do { \
6134 1.333 msaitoh struct ixgbe_hw *_hw = &(sc)->hw; \
6135 1.158 msaitoh int _i; \
6136 1.158 msaitoh \
6137 1.333 msaitoh printf("%s: %s", device_xname((sc)->dev), #regname); \
6138 1.333 msaitoh for (_i = 0; _i < (sc)->num_queues; _i++) { \
6139 1.158 msaitoh printf((_i == 0) ? "\t" : " "); \
6140 1.158 msaitoh printf("%08x", IXGBE_READ_REG(_hw, \
6141 1.158 msaitoh IXGBE_##regname(_i))); \
6142 1.158 msaitoh } \
6143 1.158 msaitoh printf("\n"); \
6144 1.158 msaitoh } while (0)
6145 1.158 msaitoh
6146 1.158 msaitoh /************************************************************************
6147 1.158 msaitoh * ixgbe_print_debug_info
6148 1.158 msaitoh *
6149 1.158 msaitoh * Called only when em_display_debug_stats is enabled.
6150 1.158 msaitoh * Provides a way to take a look at important statistics
6151 1.158 msaitoh * maintained by the driver and hardware.
6152 1.158 msaitoh ************************************************************************/
6153 1.158 msaitoh static void
6154 1.333 msaitoh ixgbe_print_debug_info(struct ixgbe_softc *sc)
6155 1.158 msaitoh {
6156 1.333 msaitoh device_t dev = sc->dev;
6157 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6158 1.158 msaitoh int table_size;
6159 1.158 msaitoh int i;
6160 1.158 msaitoh
6161 1.333 msaitoh switch (sc->hw.mac.type) {
6162 1.158 msaitoh case ixgbe_mac_X550:
6163 1.158 msaitoh case ixgbe_mac_X550EM_x:
6164 1.158 msaitoh case ixgbe_mac_X550EM_a:
6165 1.158 msaitoh table_size = 128;
6166 1.158 msaitoh break;
6167 1.158 msaitoh default:
6168 1.158 msaitoh table_size = 32;
6169 1.158 msaitoh break;
6170 1.158 msaitoh }
6171 1.185 msaitoh
6172 1.158 msaitoh device_printf(dev, "[E]RETA:\n");
6173 1.158 msaitoh for (i = 0; i < table_size; i++) {
6174 1.158 msaitoh if (i < 32)
6175 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6176 1.158 msaitoh IXGBE_RETA(i)));
6177 1.158 msaitoh else
6178 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6179 1.158 msaitoh IXGBE_ERETA(i - 32)));
6180 1.158 msaitoh }
6181 1.158 msaitoh
6182 1.158 msaitoh device_printf(dev, "queue:");
6183 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6184 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6185 1.158 msaitoh printf("%8d", i);
6186 1.158 msaitoh }
6187 1.158 msaitoh printf("\n");
6188 1.333 msaitoh PRINTQS(sc, RDBAL);
6189 1.333 msaitoh PRINTQS(sc, RDBAH);
6190 1.333 msaitoh PRINTQS(sc, RDLEN);
6191 1.333 msaitoh PRINTQS(sc, SRRCTL);
6192 1.333 msaitoh PRINTQS(sc, RDH);
6193 1.333 msaitoh PRINTQS(sc, RDT);
6194 1.333 msaitoh PRINTQS(sc, RXDCTL);
6195 1.158 msaitoh
6196 1.158 msaitoh device_printf(dev, "RQSMR:");
6197 1.333 msaitoh for (i = 0; i < sc->num_queues / 4; i++) {
6198 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6199 1.158 msaitoh printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6200 1.158 msaitoh }
6201 1.158 msaitoh printf("\n");
6202 1.158 msaitoh
6203 1.158 msaitoh device_printf(dev, "disabled_count:");
6204 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6205 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6206 1.333 msaitoh printf("%8d", sc->queues[i].disabled_count);
6207 1.158 msaitoh }
6208 1.158 msaitoh printf("\n");
6209 1.185 msaitoh
6210 1.158 msaitoh device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6211 1.158 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
6212 1.158 msaitoh device_printf(dev, "EIMS_EX(0):\t%08x\n",
6213 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6214 1.158 msaitoh device_printf(dev, "EIMS_EX(1):\t%08x\n",
6215 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6216 1.158 msaitoh }
6217 1.265 msaitoh device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6218 1.265 msaitoh device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6219 1.158 msaitoh } /* ixgbe_print_debug_info */
6220 1.158 msaitoh
6221 1.158 msaitoh /************************************************************************
6222 1.158 msaitoh * ixgbe_sysctl_debug
6223 1.158 msaitoh ************************************************************************/
6224 1.158 msaitoh static int
6225 1.158 msaitoh ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6226 1.158 msaitoh {
6227 1.158 msaitoh struct sysctlnode node = *rnode;
6228 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6229 1.186 msaitoh int error, result = 0;
6230 1.158 msaitoh
6231 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6232 1.169 msaitoh return (EPERM);
6233 1.169 msaitoh
6234 1.158 msaitoh node.sysctl_data = &result;
6235 1.158 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6236 1.158 msaitoh
6237 1.158 msaitoh if (error || newp == NULL)
6238 1.158 msaitoh return error;
6239 1.158 msaitoh
6240 1.158 msaitoh if (result == 1)
6241 1.333 msaitoh ixgbe_print_debug_info(sc);
6242 1.158 msaitoh
6243 1.158 msaitoh return 0;
6244 1.158 msaitoh } /* ixgbe_sysctl_debug */
6245 1.158 msaitoh
6246 1.99 msaitoh /************************************************************************
6247 1.286 msaitoh * ixgbe_sysctl_rx_copy_len
6248 1.286 msaitoh ************************************************************************/
6249 1.286 msaitoh static int
6250 1.286 msaitoh ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6251 1.286 msaitoh {
6252 1.286 msaitoh struct sysctlnode node = *rnode;
6253 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6254 1.286 msaitoh int error;
6255 1.333 msaitoh int result = sc->rx_copy_len;
6256 1.286 msaitoh
6257 1.286 msaitoh node.sysctl_data = &result;
6258 1.286 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6259 1.286 msaitoh
6260 1.286 msaitoh if (error || newp == NULL)
6261 1.286 msaitoh return error;
6262 1.286 msaitoh
6263 1.286 msaitoh if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6264 1.286 msaitoh return EINVAL;
6265 1.286 msaitoh
6266 1.333 msaitoh sc->rx_copy_len = result;
6267 1.286 msaitoh
6268 1.286 msaitoh return 0;
6269 1.286 msaitoh } /* ixgbe_sysctl_rx_copy_len */
6270 1.286 msaitoh
6271 1.286 msaitoh /************************************************************************
6272 1.313 msaitoh * ixgbe_sysctl_tx_process_limit
6273 1.313 msaitoh ************************************************************************/
6274 1.313 msaitoh static int
6275 1.313 msaitoh ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6276 1.313 msaitoh {
6277 1.313 msaitoh struct sysctlnode node = *rnode;
6278 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6279 1.313 msaitoh int error;
6280 1.333 msaitoh int result = sc->tx_process_limit;
6281 1.313 msaitoh
6282 1.313 msaitoh node.sysctl_data = &result;
6283 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6284 1.313 msaitoh
6285 1.313 msaitoh if (error || newp == NULL)
6286 1.313 msaitoh return error;
6287 1.313 msaitoh
6288 1.333 msaitoh if ((result <= 0) || (result > sc->num_tx_desc))
6289 1.313 msaitoh return EINVAL;
6290 1.313 msaitoh
6291 1.333 msaitoh sc->tx_process_limit = result;
6292 1.313 msaitoh
6293 1.313 msaitoh return 0;
6294 1.313 msaitoh } /* ixgbe_sysctl_tx_process_limit */
6295 1.313 msaitoh
6296 1.313 msaitoh /************************************************************************
6297 1.313 msaitoh * ixgbe_sysctl_rx_process_limit
6298 1.313 msaitoh ************************************************************************/
6299 1.313 msaitoh static int
6300 1.313 msaitoh ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6301 1.313 msaitoh {
6302 1.313 msaitoh struct sysctlnode node = *rnode;
6303 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6304 1.313 msaitoh int error;
6305 1.333 msaitoh int result = sc->rx_process_limit;
6306 1.313 msaitoh
6307 1.313 msaitoh node.sysctl_data = &result;
6308 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6309 1.313 msaitoh
6310 1.313 msaitoh if (error || newp == NULL)
6311 1.313 msaitoh return error;
6312 1.313 msaitoh
6313 1.333 msaitoh if ((result <= 0) || (result > sc->num_rx_desc))
6314 1.313 msaitoh return EINVAL;
6315 1.313 msaitoh
6316 1.333 msaitoh sc->rx_process_limit = result;
6317 1.313 msaitoh
6318 1.313 msaitoh return 0;
6319 1.313 msaitoh } /* ixgbe_sysctl_rx_process_limit */
6320 1.313 msaitoh
6321 1.313 msaitoh /************************************************************************
6322 1.99 msaitoh * ixgbe_init_device_features
6323 1.99 msaitoh ************************************************************************/
6324 1.99 msaitoh static void
6325 1.333 msaitoh ixgbe_init_device_features(struct ixgbe_softc *sc)
6326 1.99 msaitoh {
6327 1.333 msaitoh sc->feat_cap = IXGBE_FEATURE_NETMAP
6328 1.186 msaitoh | IXGBE_FEATURE_RSS
6329 1.186 msaitoh | IXGBE_FEATURE_MSI
6330 1.186 msaitoh | IXGBE_FEATURE_MSIX
6331 1.186 msaitoh | IXGBE_FEATURE_LEGACY_IRQ
6332 1.186 msaitoh | IXGBE_FEATURE_LEGACY_TX;
6333 1.99 msaitoh
6334 1.99 msaitoh /* Set capabilities first... */
6335 1.333 msaitoh switch (sc->hw.mac.type) {
6336 1.99 msaitoh case ixgbe_mac_82598EB:
6337 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
6338 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6339 1.99 msaitoh break;
6340 1.99 msaitoh case ixgbe_mac_X540:
6341 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6342 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6343 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6344 1.333 msaitoh (sc->hw.bus.func == 0))
6345 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6346 1.99 msaitoh break;
6347 1.99 msaitoh case ixgbe_mac_X550:
6348 1.169 msaitoh /*
6349 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6350 1.169 msaitoh * NVM Image version.
6351 1.169 msaitoh */
6352 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6353 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6354 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6355 1.99 msaitoh break;
6356 1.99 msaitoh case ixgbe_mac_X550EM_x:
6357 1.169 msaitoh /*
6358 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6359 1.169 msaitoh * NVM Image version.
6360 1.169 msaitoh */
6361 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6362 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6363 1.99 msaitoh break;
6364 1.99 msaitoh case ixgbe_mac_X550EM_a:
6365 1.169 msaitoh /*
6366 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6367 1.169 msaitoh * NVM Image version.
6368 1.169 msaitoh */
6369 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6370 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6371 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6372 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6373 1.333 msaitoh (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6374 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6375 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_EEE;
6376 1.99 msaitoh }
6377 1.99 msaitoh break;
6378 1.99 msaitoh case ixgbe_mac_82599EB:
6379 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6380 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6381 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6382 1.333 msaitoh (sc->hw.bus.func == 0))
6383 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6384 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6385 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6386 1.99 msaitoh break;
6387 1.99 msaitoh default:
6388 1.99 msaitoh break;
6389 1.99 msaitoh }
6390 1.99 msaitoh
6391 1.99 msaitoh /* Enabled by default... */
6392 1.99 msaitoh /* Fan failure detection */
6393 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6394 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6395 1.99 msaitoh /* Netmap */
6396 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
6397 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_NETMAP;
6398 1.99 msaitoh /* EEE */
6399 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
6400 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6401 1.99 msaitoh /* Thermal Sensor */
6402 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6403 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6404 1.169 msaitoh /*
6405 1.169 msaitoh * Recovery mode:
6406 1.169 msaitoh * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6407 1.169 msaitoh * NVM Image version.
6408 1.169 msaitoh */
6409 1.99 msaitoh
6410 1.99 msaitoh /* Enabled via global sysctl... */
6411 1.99 msaitoh /* Flow Director */
6412 1.99 msaitoh if (ixgbe_enable_fdir) {
6413 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FDIR)
6414 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FDIR;
6415 1.99 msaitoh else
6416 1.333 msaitoh device_printf(sc->dev, "Device does not support "
6417 1.320 msaitoh "Flow Director. Leaving disabled.");
6418 1.99 msaitoh }
6419 1.99 msaitoh /* Legacy (single queue) transmit */
6420 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6421 1.99 msaitoh ixgbe_enable_legacy_tx)
6422 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6423 1.99 msaitoh /*
6424 1.99 msaitoh * Message Signal Interrupts - Extended (MSI-X)
6425 1.99 msaitoh * Normal MSI is only enabled if MSI-X calls fail.
6426 1.99 msaitoh */
6427 1.99 msaitoh if (!ixgbe_enable_msix)
6428 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
6429 1.99 msaitoh /* Receive-Side Scaling (RSS) */
6430 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6431 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RSS;
6432 1.99 msaitoh
6433 1.99 msaitoh /* Disable features with unmet dependencies... */
6434 1.99 msaitoh /* No MSI-X */
6435 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
6436 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
6437 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6438 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
6439 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
6440 1.99 msaitoh }
6441 1.99 msaitoh } /* ixgbe_init_device_features */
6442 1.44 msaitoh
6443 1.99 msaitoh /************************************************************************
6444 1.99 msaitoh * ixgbe_probe - Device identification routine
6445 1.98 msaitoh *
6446 1.99 msaitoh * Determines if the driver should be loaded on
6447 1.99 msaitoh * adapter based on its PCI vendor/device ID.
6448 1.98 msaitoh *
6449 1.99 msaitoh * return BUS_PROBE_DEFAULT on success, positive on failure
6450 1.99 msaitoh ************************************************************************/
6451 1.98 msaitoh static int
6452 1.98 msaitoh ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6453 1.98 msaitoh {
6454 1.98 msaitoh const struct pci_attach_args *pa = aux;
6455 1.98 msaitoh
6456 1.98 msaitoh return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6457 1.98 msaitoh }
6458 1.98 msaitoh
6459 1.159 maxv static const ixgbe_vendor_info_t *
6460 1.98 msaitoh ixgbe_lookup(const struct pci_attach_args *pa)
6461 1.98 msaitoh {
6462 1.159 maxv const ixgbe_vendor_info_t *ent;
6463 1.98 msaitoh pcireg_t subid;
6464 1.98 msaitoh
6465 1.98 msaitoh INIT_DEBUGOUT("ixgbe_lookup: begin");
6466 1.98 msaitoh
6467 1.98 msaitoh if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6468 1.98 msaitoh return NULL;
6469 1.98 msaitoh
6470 1.98 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6471 1.98 msaitoh
6472 1.98 msaitoh for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6473 1.99 msaitoh if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6474 1.99 msaitoh (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6475 1.99 msaitoh ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6476 1.99 msaitoh (ent->subvendor_id == 0)) &&
6477 1.99 msaitoh ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6478 1.99 msaitoh (ent->subdevice_id == 0))) {
6479 1.98 msaitoh return ent;
6480 1.98 msaitoh }
6481 1.98 msaitoh }
6482 1.98 msaitoh return NULL;
6483 1.98 msaitoh }
6484 1.98 msaitoh
6485 1.98 msaitoh static int
6486 1.98 msaitoh ixgbe_ifflags_cb(struct ethercom *ec)
6487 1.98 msaitoh {
6488 1.98 msaitoh struct ifnet *ifp = &ec->ec_if;
6489 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6490 1.210 msaitoh u_short change;
6491 1.210 msaitoh int rv = 0;
6492 1.98 msaitoh
6493 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6494 1.98 msaitoh
6495 1.333 msaitoh change = ifp->if_flags ^ sc->if_flags;
6496 1.98 msaitoh if (change != 0)
6497 1.333 msaitoh sc->if_flags = ifp->if_flags;
6498 1.98 msaitoh
6499 1.192 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6500 1.192 msaitoh rv = ENETRESET;
6501 1.192 msaitoh goto out;
6502 1.192 msaitoh } else if ((change & IFF_PROMISC) != 0)
6503 1.333 msaitoh ixgbe_set_rxfilter(sc);
6504 1.98 msaitoh
6505 1.193 msaitoh /* Check for ec_capenable. */
6506 1.333 msaitoh change = ec->ec_capenable ^ sc->ec_capenable;
6507 1.333 msaitoh sc->ec_capenable = ec->ec_capenable;
6508 1.193 msaitoh if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6509 1.193 msaitoh | ETHERCAP_VLAN_HWFILTER)) != 0) {
6510 1.193 msaitoh rv = ENETRESET;
6511 1.193 msaitoh goto out;
6512 1.193 msaitoh }
6513 1.193 msaitoh
6514 1.193 msaitoh /*
6515 1.193 msaitoh * Special handling is not required for ETHERCAP_VLAN_MTU.
6516 1.193 msaitoh * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6517 1.193 msaitoh */
6518 1.193 msaitoh
6519 1.98 msaitoh /* Set up VLAN support and filter */
6520 1.193 msaitoh if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6521 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
6522 1.98 msaitoh
6523 1.192 msaitoh out:
6524 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6525 1.98 msaitoh
6526 1.192 msaitoh return rv;
6527 1.98 msaitoh }
6528 1.98 msaitoh
6529 1.99 msaitoh /************************************************************************
6530 1.99 msaitoh * ixgbe_ioctl - Ioctl entry point
6531 1.98 msaitoh *
6532 1.99 msaitoh * Called when the user wants to configure the interface.
6533 1.98 msaitoh *
6534 1.99 msaitoh * return 0 on success, positive on failure
6535 1.99 msaitoh ************************************************************************/
6536 1.98 msaitoh static int
6537 1.232 msaitoh ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6538 1.98 msaitoh {
6539 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6540 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6541 1.98 msaitoh struct ifcapreq *ifcr = data;
6542 1.98 msaitoh struct ifreq *ifr = data;
6543 1.186 msaitoh int error = 0;
6544 1.98 msaitoh int l4csum_en;
6545 1.185 msaitoh const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6546 1.185 msaitoh IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6547 1.98 msaitoh
6548 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6549 1.169 msaitoh return (EPERM);
6550 1.169 msaitoh
6551 1.98 msaitoh switch (command) {
6552 1.98 msaitoh case SIOCSIFFLAGS:
6553 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6554 1.98 msaitoh break;
6555 1.98 msaitoh case SIOCADDMULTI:
6556 1.98 msaitoh case SIOCDELMULTI:
6557 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6558 1.98 msaitoh break;
6559 1.98 msaitoh case SIOCSIFMEDIA:
6560 1.98 msaitoh case SIOCGIFMEDIA:
6561 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6562 1.98 msaitoh break;
6563 1.98 msaitoh case SIOCSIFCAP:
6564 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6565 1.98 msaitoh break;
6566 1.98 msaitoh case SIOCSIFMTU:
6567 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6568 1.98 msaitoh break;
6569 1.98 msaitoh #ifdef __NetBSD__
6570 1.98 msaitoh case SIOCINITIFADDR:
6571 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6572 1.98 msaitoh break;
6573 1.98 msaitoh case SIOCGIFFLAGS:
6574 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6575 1.98 msaitoh break;
6576 1.98 msaitoh case SIOCGIFAFLAG_IN:
6577 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6578 1.98 msaitoh break;
6579 1.98 msaitoh case SIOCGIFADDR:
6580 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6581 1.98 msaitoh break;
6582 1.98 msaitoh case SIOCGIFMTU:
6583 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6584 1.98 msaitoh break;
6585 1.98 msaitoh case SIOCGIFCAP:
6586 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6587 1.98 msaitoh break;
6588 1.98 msaitoh case SIOCGETHERCAP:
6589 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6590 1.98 msaitoh break;
6591 1.98 msaitoh case SIOCGLIFADDR:
6592 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6593 1.98 msaitoh break;
6594 1.98 msaitoh case SIOCZIFDATA:
6595 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6596 1.98 msaitoh hw->mac.ops.clear_hw_cntrs(hw);
6597 1.333 msaitoh ixgbe_clear_evcnt(sc);
6598 1.98 msaitoh break;
6599 1.98 msaitoh case SIOCAIFADDR:
6600 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6601 1.98 msaitoh break;
6602 1.98 msaitoh #endif
6603 1.98 msaitoh default:
6604 1.98 msaitoh IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6605 1.98 msaitoh break;
6606 1.98 msaitoh }
6607 1.24 msaitoh
6608 1.98 msaitoh switch (command) {
6609 1.98 msaitoh case SIOCGI2C:
6610 1.98 msaitoh {
6611 1.98 msaitoh struct ixgbe_i2c_req i2c;
6612 1.24 msaitoh
6613 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6614 1.98 msaitoh error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6615 1.98 msaitoh if (error != 0)
6616 1.98 msaitoh break;
6617 1.98 msaitoh if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6618 1.98 msaitoh error = EINVAL;
6619 1.98 msaitoh break;
6620 1.98 msaitoh }
6621 1.98 msaitoh if (i2c.len > sizeof(i2c.data)) {
6622 1.98 msaitoh error = EINVAL;
6623 1.98 msaitoh break;
6624 1.98 msaitoh }
6625 1.24 msaitoh
6626 1.98 msaitoh hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6627 1.98 msaitoh i2c.dev_addr, i2c.data);
6628 1.98 msaitoh error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6629 1.98 msaitoh break;
6630 1.98 msaitoh }
6631 1.98 msaitoh case SIOCSIFCAP:
6632 1.98 msaitoh /* Layer-4 Rx checksum offload has to be turned on and
6633 1.98 msaitoh * off as a unit.
6634 1.98 msaitoh */
6635 1.98 msaitoh l4csum_en = ifcr->ifcr_capenable & l4csum;
6636 1.98 msaitoh if (l4csum_en != l4csum && l4csum_en != 0)
6637 1.98 msaitoh return EINVAL;
6638 1.98 msaitoh /*FALLTHROUGH*/
6639 1.98 msaitoh case SIOCADDMULTI:
6640 1.98 msaitoh case SIOCDELMULTI:
6641 1.98 msaitoh case SIOCSIFFLAGS:
6642 1.98 msaitoh case SIOCSIFMTU:
6643 1.98 msaitoh default:
6644 1.98 msaitoh if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6645 1.98 msaitoh return error;
6646 1.98 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
6647 1.98 msaitoh ;
6648 1.98 msaitoh else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6649 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6650 1.135 msaitoh if ((ifp->if_flags & IFF_RUNNING) != 0)
6651 1.333 msaitoh ixgbe_init_locked(sc);
6652 1.333 msaitoh ixgbe_recalculate_max_frame(sc);
6653 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6654 1.98 msaitoh } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6655 1.98 msaitoh /*
6656 1.98 msaitoh * Multicast list has changed; set the hardware filter
6657 1.98 msaitoh * accordingly.
6658 1.98 msaitoh */
6659 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6660 1.333 msaitoh ixgbe_disable_intr(sc);
6661 1.333 msaitoh ixgbe_set_rxfilter(sc);
6662 1.333 msaitoh ixgbe_enable_intr(sc);
6663 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6664 1.98 msaitoh }
6665 1.98 msaitoh return 0;
6666 1.24 msaitoh }
6667 1.24 msaitoh
6668 1.98 msaitoh return error;
6669 1.99 msaitoh } /* ixgbe_ioctl */
6670 1.99 msaitoh
6671 1.99 msaitoh /************************************************************************
6672 1.99 msaitoh * ixgbe_check_fan_failure
6673 1.99 msaitoh ************************************************************************/
6674 1.274 msaitoh static int
6675 1.333 msaitoh ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
6676 1.99 msaitoh {
6677 1.99 msaitoh u32 mask;
6678 1.99 msaitoh
6679 1.333 msaitoh mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
6680 1.99 msaitoh IXGBE_ESDP_SDP1;
6681 1.26 msaitoh
6682 1.312 msaitoh if ((reg & mask) == 0)
6683 1.312 msaitoh return IXGBE_SUCCESS;
6684 1.312 msaitoh
6685 1.312 msaitoh /*
6686 1.312 msaitoh * Use ratecheck() just in case interrupt occur frequently.
6687 1.312 msaitoh * When EXPX9501AT's fan stopped, interrupt occurred only once,
6688 1.312 msaitoh * an red LED on the board turned on and link never up until
6689 1.312 msaitoh * power off.
6690 1.312 msaitoh */
6691 1.333 msaitoh if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl))
6692 1.333 msaitoh device_printf(sc->dev,
6693 1.280 msaitoh "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6694 1.274 msaitoh
6695 1.312 msaitoh return IXGBE_ERR_FAN_FAILURE;
6696 1.99 msaitoh } /* ixgbe_check_fan_failure */
6697 1.99 msaitoh
6698 1.99 msaitoh /************************************************************************
6699 1.99 msaitoh * ixgbe_handle_que
6700 1.99 msaitoh ************************************************************************/
6701 1.98 msaitoh static void
6702 1.98 msaitoh ixgbe_handle_que(void *context)
6703 1.44 msaitoh {
6704 1.98 msaitoh struct ix_queue *que = context;
6705 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
6706 1.186 msaitoh struct tx_ring *txr = que->txr;
6707 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6708 1.121 msaitoh bool more = false;
6709 1.44 msaitoh
6710 1.305 msaitoh IXGBE_EVC_ADD(&que->handleq, 1);
6711 1.44 msaitoh
6712 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING) {
6713 1.98 msaitoh IXGBE_TX_LOCK(txr);
6714 1.323 msaitoh more = ixgbe_txeof(txr);
6715 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
6716 1.99 msaitoh if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6717 1.99 msaitoh ixgbe_mq_start_locked(ifp, txr);
6718 1.98 msaitoh /* Only for queue 0 */
6719 1.99 msaitoh /* NetBSD still needs this for CBQ */
6720 1.333 msaitoh if ((&sc->queues[0] == que)
6721 1.99 msaitoh && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6722 1.99 msaitoh ixgbe_legacy_start_locked(ifp, txr);
6723 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
6724 1.323 msaitoh more |= ixgbe_rxeof(que);
6725 1.44 msaitoh }
6726 1.44 msaitoh
6727 1.128 knakahar if (more) {
6728 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
6729 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
6730 1.128 knakahar } else if (que->res != NULL) {
6731 1.265 msaitoh /* MSIX: Re-enable this interrupt */
6732 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
6733 1.265 msaitoh } else {
6734 1.265 msaitoh /* INTx or MSI */
6735 1.333 msaitoh ixgbe_enable_queue(sc, 0);
6736 1.265 msaitoh }
6737 1.99 msaitoh
6738 1.98 msaitoh return;
6739 1.99 msaitoh } /* ixgbe_handle_que */
6740 1.44 msaitoh
6741 1.99 msaitoh /************************************************************************
6742 1.128 knakahar * ixgbe_handle_que_work
6743 1.128 knakahar ************************************************************************/
6744 1.128 knakahar static void
6745 1.128 knakahar ixgbe_handle_que_work(struct work *wk, void *context)
6746 1.128 knakahar {
6747 1.128 knakahar struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6748 1.128 knakahar
6749 1.128 knakahar /*
6750 1.128 knakahar * "enqueued flag" is not required here.
6751 1.128 knakahar * See ixgbe_msix_que().
6752 1.128 knakahar */
6753 1.128 knakahar ixgbe_handle_que(que);
6754 1.128 knakahar }
6755 1.128 knakahar
6756 1.128 knakahar /************************************************************************
6757 1.99 msaitoh * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6758 1.99 msaitoh ************************************************************************/
6759 1.48 msaitoh static int
6760 1.333 msaitoh ixgbe_allocate_legacy(struct ixgbe_softc *sc,
6761 1.98 msaitoh const struct pci_attach_args *pa)
6762 1.48 msaitoh {
6763 1.333 msaitoh device_t dev = sc->dev;
6764 1.333 msaitoh struct ix_queue *que = sc->queues;
6765 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
6766 1.98 msaitoh int counts[PCI_INTR_TYPE_SIZE];
6767 1.98 msaitoh pci_intr_type_t intr_type, max_type;
6768 1.186 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6769 1.206 knakahar char wqname[MAXCOMLEN];
6770 1.98 msaitoh const char *intrstr = NULL;
6771 1.206 knakahar int defertx_error = 0, error;
6772 1.185 msaitoh
6773 1.99 msaitoh /* We allocate a single interrupt resource */
6774 1.98 msaitoh max_type = PCI_INTR_TYPE_MSI;
6775 1.98 msaitoh counts[PCI_INTR_TYPE_MSIX] = 0;
6776 1.99 msaitoh counts[PCI_INTR_TYPE_MSI] =
6777 1.333 msaitoh (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6778 1.118 msaitoh /* Check not feat_en but feat_cap to fallback to INTx */
6779 1.99 msaitoh counts[PCI_INTR_TYPE_INTX] =
6780 1.333 msaitoh (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6781 1.48 msaitoh
6782 1.98 msaitoh alloc_retry:
6783 1.333 msaitoh if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) {
6784 1.98 msaitoh aprint_error_dev(dev, "couldn't alloc interrupt\n");
6785 1.98 msaitoh return ENXIO;
6786 1.98 msaitoh }
6787 1.333 msaitoh sc->osdep.nintrs = 1;
6788 1.333 msaitoh intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0],
6789 1.98 msaitoh intrbuf, sizeof(intrbuf));
6790 1.333 msaitoh sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc,
6791 1.333 msaitoh sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6792 1.98 msaitoh device_xname(dev));
6793 1.333 msaitoh intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]);
6794 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6795 1.98 msaitoh aprint_error_dev(dev,"unable to establish %s\n",
6796 1.98 msaitoh (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6797 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6798 1.333 msaitoh sc->osdep.intrs = NULL;
6799 1.98 msaitoh switch (intr_type) {
6800 1.98 msaitoh case PCI_INTR_TYPE_MSI:
6801 1.98 msaitoh /* The next try is for INTx: Disable MSI */
6802 1.98 msaitoh max_type = PCI_INTR_TYPE_INTX;
6803 1.98 msaitoh counts[PCI_INTR_TYPE_INTX] = 1;
6804 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6805 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6806 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6807 1.118 msaitoh goto alloc_retry;
6808 1.118 msaitoh } else
6809 1.118 msaitoh break;
6810 1.98 msaitoh case PCI_INTR_TYPE_INTX:
6811 1.98 msaitoh default:
6812 1.98 msaitoh /* See below */
6813 1.98 msaitoh break;
6814 1.98 msaitoh }
6815 1.98 msaitoh }
6816 1.119 msaitoh if (intr_type == PCI_INTR_TYPE_INTX) {
6817 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6818 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6819 1.119 msaitoh }
6820 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6821 1.98 msaitoh aprint_error_dev(dev,
6822 1.98 msaitoh "couldn't establish interrupt%s%s\n",
6823 1.98 msaitoh intrstr ? " at " : "", intrstr ? intrstr : "");
6824 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6825 1.333 msaitoh sc->osdep.intrs = NULL;
6826 1.98 msaitoh return ENXIO;
6827 1.98 msaitoh }
6828 1.98 msaitoh aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6829 1.98 msaitoh /*
6830 1.98 msaitoh * Try allocating a fast interrupt and the associated deferred
6831 1.98 msaitoh * processing contexts.
6832 1.98 msaitoh */
6833 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6834 1.99 msaitoh txr->txr_si =
6835 1.229 msaitoh softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6836 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6837 1.206 knakahar
6838 1.280 msaitoh snprintf(wqname, sizeof(wqname), "%sdeferTx",
6839 1.280 msaitoh device_xname(dev));
6840 1.333 msaitoh defertx_error = workqueue_create(&sc->txr_wq, wqname,
6841 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI,
6842 1.206 knakahar IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6843 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6844 1.206 knakahar }
6845 1.229 msaitoh que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6846 1.98 msaitoh ixgbe_handle_que, que);
6847 1.206 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6848 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
6849 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
6850 1.206 knakahar IXGBE_WORKQUEUE_FLAGS);
6851 1.48 msaitoh
6852 1.333 msaitoh if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)
6853 1.206 knakahar && ((txr->txr_si == NULL) || defertx_error != 0))
6854 1.206 knakahar || (que->que_si == NULL) || error != 0) {
6855 1.98 msaitoh aprint_error_dev(dev,
6856 1.185 msaitoh "could not establish software interrupts\n");
6857 1.99 msaitoh
6858 1.98 msaitoh return ENXIO;
6859 1.98 msaitoh }
6860 1.98 msaitoh /* For simplicity in the handlers */
6861 1.333 msaitoh sc->active_queues = IXGBE_EIMS_ENABLE_MASK;
6862 1.44 msaitoh
6863 1.44 msaitoh return (0);
6864 1.99 msaitoh } /* ixgbe_allocate_legacy */
6865 1.44 msaitoh
6866 1.99 msaitoh /************************************************************************
6867 1.99 msaitoh * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6868 1.99 msaitoh ************************************************************************/
6869 1.44 msaitoh static int
6870 1.333 msaitoh ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
6871 1.44 msaitoh {
6872 1.333 msaitoh device_t dev = sc->dev;
6873 1.341 msaitoh struct ix_queue *que = sc->queues;
6874 1.341 msaitoh struct tx_ring *txr = sc->tx_rings;
6875 1.98 msaitoh pci_chipset_tag_t pc;
6876 1.98 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6877 1.98 msaitoh char intr_xname[32];
6878 1.128 knakahar char wqname[MAXCOMLEN];
6879 1.98 msaitoh const char *intrstr = NULL;
6880 1.186 msaitoh int error, vector = 0;
6881 1.98 msaitoh int cpu_id = 0;
6882 1.98 msaitoh kcpuset_t *affinity;
6883 1.99 msaitoh #ifdef RSS
6884 1.186 msaitoh unsigned int rss_buckets = 0;
6885 1.99 msaitoh kcpuset_t cpu_mask;
6886 1.98 msaitoh #endif
6887 1.98 msaitoh
6888 1.333 msaitoh pc = sc->osdep.pc;
6889 1.98 msaitoh #ifdef RSS
6890 1.98 msaitoh /*
6891 1.98 msaitoh * If we're doing RSS, the number of queues needs to
6892 1.98 msaitoh * match the number of RSS buckets that are configured.
6893 1.98 msaitoh *
6894 1.98 msaitoh * + If there's more queues than RSS buckets, we'll end
6895 1.98 msaitoh * up with queues that get no traffic.
6896 1.98 msaitoh *
6897 1.98 msaitoh * + If there's more RSS buckets than queues, we'll end
6898 1.98 msaitoh * up having multiple RSS buckets map to the same queue,
6899 1.98 msaitoh * so there'll be some contention.
6900 1.98 msaitoh */
6901 1.99 msaitoh rss_buckets = rss_getnumbuckets();
6902 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
6903 1.333 msaitoh (sc->num_queues != rss_buckets)) {
6904 1.98 msaitoh device_printf(dev,
6905 1.98 msaitoh "%s: number of queues (%d) != number of RSS buckets (%d)"
6906 1.98 msaitoh "; performance will be impacted.\n",
6907 1.333 msaitoh __func__, sc->num_queues, rss_buckets);
6908 1.98 msaitoh }
6909 1.98 msaitoh #endif
6910 1.98 msaitoh
6911 1.333 msaitoh sc->osdep.nintrs = sc->num_queues + 1;
6912 1.333 msaitoh if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
6913 1.333 msaitoh sc->osdep.nintrs) != 0) {
6914 1.98 msaitoh aprint_error_dev(dev,
6915 1.98 msaitoh "failed to allocate MSI-X interrupt\n");
6916 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
6917 1.98 msaitoh return (ENXIO);
6918 1.98 msaitoh }
6919 1.98 msaitoh
6920 1.98 msaitoh kcpuset_create(&affinity, false);
6921 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
6922 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6923 1.98 msaitoh device_xname(dev), i);
6924 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
6925 1.98 msaitoh sizeof(intrbuf));
6926 1.98 msaitoh #ifdef IXGBE_MPSAFE
6927 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
6928 1.98 msaitoh true);
6929 1.98 msaitoh #endif
6930 1.98 msaitoh /* Set the handler function */
6931 1.333 msaitoh que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
6932 1.333 msaitoh sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6933 1.98 msaitoh intr_xname);
6934 1.98 msaitoh if (que->res == NULL) {
6935 1.98 msaitoh aprint_error_dev(dev,
6936 1.98 msaitoh "Failed to register QUE handler\n");
6937 1.119 msaitoh error = ENXIO;
6938 1.119 msaitoh goto err_out;
6939 1.98 msaitoh }
6940 1.98 msaitoh que->msix = vector;
6941 1.333 msaitoh sc->active_queues |= 1ULL << que->msix;
6942 1.99 msaitoh
6943 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
6944 1.98 msaitoh #ifdef RSS
6945 1.99 msaitoh /*
6946 1.99 msaitoh * The queue ID is used as the RSS layer bucket ID.
6947 1.99 msaitoh * We look up the queue ID -> RSS CPU ID and select
6948 1.99 msaitoh * that.
6949 1.99 msaitoh */
6950 1.99 msaitoh cpu_id = rss_getcpu(i % rss_getnumbuckets());
6951 1.99 msaitoh CPU_SETOF(cpu_id, &cpu_mask);
6952 1.98 msaitoh #endif
6953 1.99 msaitoh } else {
6954 1.99 msaitoh /*
6955 1.99 msaitoh * Bind the MSI-X vector, and thus the
6956 1.99 msaitoh * rings to the corresponding CPU.
6957 1.99 msaitoh *
6958 1.99 msaitoh * This just happens to match the default RSS
6959 1.99 msaitoh * round-robin bucket -> queue -> CPU allocation.
6960 1.99 msaitoh */
6961 1.333 msaitoh if (sc->num_queues > 1)
6962 1.99 msaitoh cpu_id = i;
6963 1.99 msaitoh }
6964 1.98 msaitoh /* Round-robin affinity */
6965 1.98 msaitoh kcpuset_zero(affinity);
6966 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
6967 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[i], affinity,
6968 1.98 msaitoh NULL);
6969 1.98 msaitoh aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6970 1.98 msaitoh intrstr);
6971 1.98 msaitoh if (error == 0) {
6972 1.98 msaitoh #if 1 /* def IXGBE_DEBUG */
6973 1.98 msaitoh #ifdef RSS
6974 1.322 skrll aprint_normal(", bound RSS bucket %d to CPU %d", i,
6975 1.99 msaitoh cpu_id % ncpu);
6976 1.98 msaitoh #else
6977 1.99 msaitoh aprint_normal(", bound queue %d to cpu %d", i,
6978 1.99 msaitoh cpu_id % ncpu);
6979 1.98 msaitoh #endif
6980 1.98 msaitoh #endif /* IXGBE_DEBUG */
6981 1.98 msaitoh }
6982 1.98 msaitoh aprint_normal("\n");
6983 1.99 msaitoh
6984 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6985 1.99 msaitoh txr->txr_si = softint_establish(
6986 1.229 msaitoh SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6987 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6988 1.119 msaitoh if (txr->txr_si == NULL) {
6989 1.119 msaitoh aprint_error_dev(dev,
6990 1.119 msaitoh "couldn't establish software interrupt\n");
6991 1.119 msaitoh error = ENXIO;
6992 1.119 msaitoh goto err_out;
6993 1.119 msaitoh }
6994 1.119 msaitoh }
6995 1.98 msaitoh que->que_si
6996 1.229 msaitoh = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6997 1.98 msaitoh ixgbe_handle_que, que);
6998 1.98 msaitoh if (que->que_si == NULL) {
6999 1.98 msaitoh aprint_error_dev(dev,
7000 1.185 msaitoh "couldn't establish software interrupt\n");
7001 1.119 msaitoh error = ENXIO;
7002 1.119 msaitoh goto err_out;
7003 1.98 msaitoh }
7004 1.98 msaitoh }
7005 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
7006 1.333 msaitoh error = workqueue_create(&sc->txr_wq, wqname,
7007 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7008 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7009 1.128 knakahar if (error) {
7010 1.280 msaitoh aprint_error_dev(dev,
7011 1.280 msaitoh "couldn't create workqueue for deferred Tx\n");
7012 1.128 knakahar goto err_out;
7013 1.128 knakahar }
7014 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
7015 1.128 knakahar
7016 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
7017 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
7018 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7019 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7020 1.128 knakahar if (error) {
7021 1.128 knakahar aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
7022 1.128 knakahar goto err_out;
7023 1.128 knakahar }
7024 1.44 msaitoh
7025 1.98 msaitoh /* and Link */
7026 1.98 msaitoh cpu_id++;
7027 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
7028 1.333 msaitoh sc->vector = vector;
7029 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
7030 1.98 msaitoh sizeof(intrbuf));
7031 1.98 msaitoh #ifdef IXGBE_MPSAFE
7032 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE,
7033 1.98 msaitoh true);
7034 1.98 msaitoh #endif
7035 1.98 msaitoh /* Set the link handler function */
7036 1.333 msaitoh sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
7037 1.333 msaitoh sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc,
7038 1.98 msaitoh intr_xname);
7039 1.333 msaitoh if (sc->osdep.ihs[vector] == NULL) {
7040 1.98 msaitoh aprint_error_dev(dev, "Failed to register LINK handler\n");
7041 1.119 msaitoh error = ENXIO;
7042 1.119 msaitoh goto err_out;
7043 1.98 msaitoh }
7044 1.98 msaitoh /* Round-robin affinity */
7045 1.98 msaitoh kcpuset_zero(affinity);
7046 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
7047 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
7048 1.119 msaitoh NULL);
7049 1.44 msaitoh
7050 1.98 msaitoh aprint_normal_dev(dev,
7051 1.98 msaitoh "for link, interrupting at %s", intrstr);
7052 1.98 msaitoh if (error == 0)
7053 1.98 msaitoh aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
7054 1.44 msaitoh else
7055 1.98 msaitoh aprint_normal("\n");
7056 1.44 msaitoh
7057 1.98 msaitoh kcpuset_destroy(affinity);
7058 1.119 msaitoh aprint_normal_dev(dev,
7059 1.119 msaitoh "Using MSI-X interrupts with %d vectors\n", vector + 1);
7060 1.99 msaitoh
7061 1.44 msaitoh return (0);
7062 1.119 msaitoh
7063 1.119 msaitoh err_out:
7064 1.119 msaitoh kcpuset_destroy(affinity);
7065 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
7066 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
7067 1.119 msaitoh return (error);
7068 1.99 msaitoh } /* ixgbe_allocate_msix */
7069 1.44 msaitoh
7070 1.99 msaitoh /************************************************************************
7071 1.99 msaitoh * ixgbe_configure_interrupts
7072 1.99 msaitoh *
7073 1.99 msaitoh * Setup MSI-X, MSI, or legacy interrupts (in that order).
7074 1.99 msaitoh * This will also depend on user settings.
7075 1.99 msaitoh ************************************************************************/
7076 1.44 msaitoh static int
7077 1.333 msaitoh ixgbe_configure_interrupts(struct ixgbe_softc *sc)
7078 1.44 msaitoh {
7079 1.333 msaitoh device_t dev = sc->dev;
7080 1.333 msaitoh struct ixgbe_mac_info *mac = &sc->hw.mac;
7081 1.98 msaitoh int want, queues, msgs;
7082 1.44 msaitoh
7083 1.99 msaitoh /* Default to 1 queue if MSI-X setup fails */
7084 1.333 msaitoh sc->num_queues = 1;
7085 1.99 msaitoh
7086 1.98 msaitoh /* Override by tuneable */
7087 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX))
7088 1.98 msaitoh goto msi;
7089 1.44 msaitoh
7090 1.118 msaitoh /*
7091 1.118 msaitoh * NetBSD only: Use single vector MSI when number of CPU is 1 to save
7092 1.118 msaitoh * interrupt slot.
7093 1.118 msaitoh */
7094 1.118 msaitoh if (ncpu == 1)
7095 1.118 msaitoh goto msi;
7096 1.185 msaitoh
7097 1.99 msaitoh /* First try MSI-X */
7098 1.333 msaitoh msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
7099 1.98 msaitoh msgs = MIN(msgs, IXG_MAX_NINTR);
7100 1.98 msaitoh if (msgs < 2)
7101 1.98 msaitoh goto msi;
7102 1.44 msaitoh
7103 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7104 1.44 msaitoh
7105 1.98 msaitoh /* Figure out a reasonable auto config value */
7106 1.98 msaitoh queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
7107 1.44 msaitoh
7108 1.98 msaitoh #ifdef RSS
7109 1.98 msaitoh /* If we're doing RSS, clamp at the number of RSS buckets */
7110 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
7111 1.165 riastrad queues = uimin(queues, rss_getnumbuckets());
7112 1.98 msaitoh #endif
7113 1.99 msaitoh if (ixgbe_num_queues > queues) {
7114 1.333 msaitoh aprint_error_dev(sc->dev,
7115 1.319 msaitoh "ixgbe_num_queues (%d) is too large, "
7116 1.319 msaitoh "using reduced amount (%d).\n", ixgbe_num_queues, queues);
7117 1.99 msaitoh ixgbe_num_queues = queues;
7118 1.99 msaitoh }
7119 1.44 msaitoh
7120 1.98 msaitoh if (ixgbe_num_queues != 0)
7121 1.98 msaitoh queues = ixgbe_num_queues;
7122 1.98 msaitoh else
7123 1.165 riastrad queues = uimin(queues,
7124 1.165 riastrad uimin(mac->max_tx_queues, mac->max_rx_queues));
7125 1.44 msaitoh
7126 1.98 msaitoh /*
7127 1.99 msaitoh * Want one vector (RX/TX pair) per queue
7128 1.99 msaitoh * plus an additional for Link.
7129 1.99 msaitoh */
7130 1.98 msaitoh want = queues + 1;
7131 1.98 msaitoh if (msgs >= want)
7132 1.98 msaitoh msgs = want;
7133 1.44 msaitoh else {
7134 1.186 msaitoh aprint_error_dev(dev, "MSI-X Configuration Problem, "
7135 1.319 msaitoh "%d vectors but %d queues wanted!\n", msgs, want);
7136 1.98 msaitoh goto msi;
7137 1.44 msaitoh }
7138 1.333 msaitoh sc->num_queues = queues;
7139 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7140 1.99 msaitoh return (0);
7141 1.44 msaitoh
7142 1.98 msaitoh /*
7143 1.99 msaitoh * MSI-X allocation failed or provided us with
7144 1.99 msaitoh * less vectors than needed. Free MSI-X resources
7145 1.99 msaitoh * and we'll try enabling MSI.
7146 1.99 msaitoh */
7147 1.98 msaitoh msi:
7148 1.99 msaitoh /* Without MSI-X, some features are no longer supported */
7149 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
7150 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
7151 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7152 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
7153 1.99 msaitoh
7154 1.333 msaitoh msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag);
7155 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
7156 1.99 msaitoh if (msgs > 1)
7157 1.99 msaitoh msgs = 1;
7158 1.99 msaitoh if (msgs != 0) {
7159 1.99 msaitoh msgs = 1;
7160 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
7161 1.99 msaitoh return (0);
7162 1.99 msaitoh }
7163 1.99 msaitoh
7164 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7165 1.99 msaitoh aprint_error_dev(dev,
7166 1.99 msaitoh "Device does not support legacy interrupts.\n");
7167 1.99 msaitoh return 1;
7168 1.99 msaitoh }
7169 1.99 msaitoh
7170 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7171 1.99 msaitoh
7172 1.99 msaitoh return (0);
7173 1.99 msaitoh } /* ixgbe_configure_interrupts */
7174 1.44 msaitoh
7175 1.48 msaitoh
7176 1.99 msaitoh /************************************************************************
7177 1.99 msaitoh * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7178 1.99 msaitoh *
7179 1.99 msaitoh * Done outside of interrupt context since the driver might sleep
7180 1.99 msaitoh ************************************************************************/
7181 1.26 msaitoh static void
7182 1.98 msaitoh ixgbe_handle_link(void *context)
7183 1.26 msaitoh {
7184 1.333 msaitoh struct ixgbe_softc *sc = context;
7185 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
7186 1.26 msaitoh
7187 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
7188 1.257 msaitoh
7189 1.333 msaitoh IXGBE_EVC_ADD(&sc->link_workev, 1);
7190 1.333 msaitoh ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0);
7191 1.333 msaitoh ixgbe_update_link_status(sc);
7192 1.26 msaitoh
7193 1.98 msaitoh /* Re-enable link interrupts */
7194 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7195 1.99 msaitoh } /* ixgbe_handle_link */
7196 1.45 msaitoh
7197 1.161 kamil #if 0
7198 1.99 msaitoh /************************************************************************
7199 1.99 msaitoh * ixgbe_rearm_queues
7200 1.99 msaitoh ************************************************************************/
7201 1.160 msaitoh static __inline void
7202 1.333 msaitoh ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues)
7203 1.63 msaitoh {
7204 1.63 msaitoh u32 mask;
7205 1.63 msaitoh
7206 1.333 msaitoh switch (sc->hw.mac.type) {
7207 1.63 msaitoh case ixgbe_mac_82598EB:
7208 1.63 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7209 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
7210 1.63 msaitoh break;
7211 1.63 msaitoh case ixgbe_mac_82599EB:
7212 1.63 msaitoh case ixgbe_mac_X540:
7213 1.63 msaitoh case ixgbe_mac_X550:
7214 1.63 msaitoh case ixgbe_mac_X550EM_x:
7215 1.99 msaitoh case ixgbe_mac_X550EM_a:
7216 1.63 msaitoh mask = (queues & 0xFFFFFFFF);
7217 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
7218 1.63 msaitoh mask = (queues >> 32);
7219 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
7220 1.63 msaitoh break;
7221 1.63 msaitoh default:
7222 1.63 msaitoh break;
7223 1.63 msaitoh }
7224 1.99 msaitoh } /* ixgbe_rearm_queues */
7225 1.161 kamil #endif
7226