ixgbe.c revision 1.352 1 1.352 riastrad /* $NetBSD: ixgbe.c,v 1.352 2024/06/29 12:11:12 riastradh Exp $ */
2 1.99 msaitoh
3 1.1 dyoung /******************************************************************************
4 1.1 dyoung
5 1.99 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 dyoung All rights reserved.
7 1.99 msaitoh
8 1.99 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 dyoung modification, are permitted provided that the following conditions are met:
10 1.99 msaitoh
11 1.99 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 dyoung this list of conditions and the following disclaimer.
13 1.99 msaitoh
14 1.99 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.99 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 dyoung documentation and/or other materials provided with the distribution.
17 1.99 msaitoh
18 1.99 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.99 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 dyoung this software without specific prior written permission.
21 1.99 msaitoh
22 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.99 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.99 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.99 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.99 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.99 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.99 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.99 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.99 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 dyoung POSSIBILITY OF SUCH DAMAGE.
33 1.1 dyoung
34 1.1 dyoung ******************************************************************************/
35 1.145 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 1.99 msaitoh
37 1.1 dyoung /*
38 1.1 dyoung * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 dyoung * All rights reserved.
40 1.1 dyoung *
41 1.1 dyoung * This code is derived from software contributed to The NetBSD Foundation
42 1.1 dyoung * by Coyote Point Systems, Inc.
43 1.1 dyoung *
44 1.1 dyoung * Redistribution and use in source and binary forms, with or without
45 1.1 dyoung * modification, are permitted provided that the following conditions
46 1.1 dyoung * are met:
47 1.1 dyoung * 1. Redistributions of source code must retain the above copyright
48 1.1 dyoung * notice, this list of conditions and the following disclaimer.
49 1.1 dyoung * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 dyoung * notice, this list of conditions and the following disclaimer in the
51 1.1 dyoung * documentation and/or other materials provided with the distribution.
52 1.1 dyoung *
53 1.1 dyoung * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 dyoung * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 dyoung * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 dyoung * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 dyoung * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 dyoung * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 dyoung * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 dyoung * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 dyoung * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 dyoung * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 dyoung * POSSIBILITY OF SUCH DAMAGE.
64 1.1 dyoung */
65 1.1 dyoung
66 1.281 msaitoh #include <sys/cdefs.h>
67 1.352 riastrad __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.352 2024/06/29 12:11:12 riastradh Exp $");
68 1.281 msaitoh
69 1.80 msaitoh #ifdef _KERNEL_OPT
70 1.1 dyoung #include "opt_inet.h"
71 1.22 msaitoh #include "opt_inet6.h"
72 1.80 msaitoh #include "opt_net_mpsafe.h"
73 1.80 msaitoh #endif
74 1.1 dyoung
75 1.1 dyoung #include "ixgbe.h"
76 1.251 msaitoh #include "ixgbe_phy.h"
77 1.135 msaitoh #include "ixgbe_sriov.h"
78 1.1 dyoung
79 1.33 msaitoh #include <sys/cprng.h>
80 1.95 msaitoh #include <dev/mii/mii.h>
81 1.95 msaitoh #include <dev/mii/miivar.h>
82 1.33 msaitoh
83 1.99 msaitoh /************************************************************************
84 1.99 msaitoh * Driver version
85 1.99 msaitoh ************************************************************************/
86 1.159 maxv static const char ixgbe_driver_version[] = "4.0.1-k";
87 1.301 msaitoh /* XXX NetBSD: + 3.3.24 */
88 1.1 dyoung
89 1.99 msaitoh /************************************************************************
90 1.99 msaitoh * PCI Device ID Table
91 1.1 dyoung *
92 1.99 msaitoh * Used by probe to select devices to load on
93 1.99 msaitoh * Last field stores an index into ixgbe_strings
94 1.99 msaitoh * Last entry must be all 0s
95 1.1 dyoung *
96 1.99 msaitoh * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 1.99 msaitoh ************************************************************************/
98 1.159 maxv static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 1.1 dyoung {
100 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 1.334 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0},
121 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
122 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
123 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
124 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
125 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
126 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
127 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
128 1.24 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
129 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
130 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
131 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
132 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
133 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
134 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
135 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
136 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
137 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
138 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
139 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
140 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
141 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
142 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
143 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
144 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
145 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
146 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
147 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
148 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
149 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
150 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
151 1.1 dyoung /* required last entry */
152 1.1 dyoung {0, 0, 0, 0, 0}
153 1.1 dyoung };
154 1.1 dyoung
155 1.99 msaitoh /************************************************************************
156 1.99 msaitoh * Table of branding strings
157 1.99 msaitoh ************************************************************************/
158 1.1 dyoung static const char *ixgbe_strings[] = {
159 1.1 dyoung "Intel(R) PRO/10GbE PCI-Express Network Driver"
160 1.1 dyoung };
161 1.1 dyoung
162 1.99 msaitoh /************************************************************************
163 1.99 msaitoh * Function prototypes
164 1.99 msaitoh ************************************************************************/
165 1.186 msaitoh static int ixgbe_probe(device_t, cfdata_t, void *);
166 1.333 msaitoh static void ixgbe_quirks(struct ixgbe_softc *);
167 1.186 msaitoh static void ixgbe_attach(device_t, device_t, void *);
168 1.186 msaitoh static int ixgbe_detach(device_t, int);
169 1.1 dyoung #if 0
170 1.186 msaitoh static int ixgbe_shutdown(device_t);
171 1.1 dyoung #endif
172 1.44 msaitoh static bool ixgbe_suspend(device_t, const pmf_qual_t *);
173 1.44 msaitoh static bool ixgbe_resume(device_t, const pmf_qual_t *);
174 1.98 msaitoh static int ixgbe_ifflags_cb(struct ethercom *);
175 1.186 msaitoh static int ixgbe_ioctl(struct ifnet *, u_long, void *);
176 1.1 dyoung static int ixgbe_init(struct ifnet *);
177 1.333 msaitoh static void ixgbe_init_locked(struct ixgbe_softc *);
178 1.232 msaitoh static void ixgbe_ifstop(struct ifnet *, int);
179 1.252 msaitoh static void ixgbe_stop_locked(void *);
180 1.333 msaitoh static void ixgbe_init_device_features(struct ixgbe_softc *);
181 1.333 msaitoh static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
182 1.333 msaitoh static void ixgbe_add_media_types(struct ixgbe_softc *);
183 1.186 msaitoh static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
184 1.186 msaitoh static int ixgbe_media_change(struct ifnet *);
185 1.333 msaitoh static int ixgbe_allocate_pci_resources(struct ixgbe_softc *,
186 1.1 dyoung const struct pci_attach_args *);
187 1.333 msaitoh static void ixgbe_free_deferred_handlers(struct ixgbe_softc *);
188 1.333 msaitoh static void ixgbe_get_slot_info(struct ixgbe_softc *);
189 1.333 msaitoh static int ixgbe_allocate_msix(struct ixgbe_softc *,
190 1.1 dyoung const struct pci_attach_args *);
191 1.333 msaitoh static int ixgbe_allocate_legacy(struct ixgbe_softc *,
192 1.1 dyoung const struct pci_attach_args *);
193 1.333 msaitoh static int ixgbe_configure_interrupts(struct ixgbe_softc *);
194 1.333 msaitoh static void ixgbe_free_pciintr_resources(struct ixgbe_softc *);
195 1.333 msaitoh static void ixgbe_free_pci_resources(struct ixgbe_softc *);
196 1.1 dyoung static void ixgbe_local_timer(void *);
197 1.233 msaitoh static void ixgbe_handle_timer(struct work *, void *);
198 1.186 msaitoh static void ixgbe_recovery_mode_timer(void *);
199 1.233 msaitoh static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
200 1.333 msaitoh static int ixgbe_setup_interface(device_t, struct ixgbe_softc *);
201 1.333 msaitoh static void ixgbe_config_gpie(struct ixgbe_softc *);
202 1.333 msaitoh static void ixgbe_config_dmac(struct ixgbe_softc *);
203 1.333 msaitoh static void ixgbe_config_delay_values(struct ixgbe_softc *);
204 1.333 msaitoh static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *);
205 1.333 msaitoh static void ixgbe_config_link(struct ixgbe_softc *);
206 1.333 msaitoh static void ixgbe_check_wol_support(struct ixgbe_softc *);
207 1.333 msaitoh static int ixgbe_setup_low_power_mode(struct ixgbe_softc *);
208 1.161 kamil #if 0
209 1.333 msaitoh static void ixgbe_rearm_queues(struct ixgbe_softc *, u64);
210 1.161 kamil #endif
211 1.1 dyoung
212 1.333 msaitoh static void ixgbe_initialize_transmit_units(struct ixgbe_softc *);
213 1.333 msaitoh static void ixgbe_initialize_receive_units(struct ixgbe_softc *);
214 1.333 msaitoh static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
215 1.333 msaitoh static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
216 1.333 msaitoh static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
217 1.333 msaitoh
218 1.333 msaitoh static void ixgbe_enable_intr(struct ixgbe_softc *);
219 1.333 msaitoh static void ixgbe_disable_intr(struct ixgbe_softc *);
220 1.333 msaitoh static void ixgbe_update_stats_counters(struct ixgbe_softc *);
221 1.333 msaitoh static void ixgbe_set_rxfilter(struct ixgbe_softc *);
222 1.333 msaitoh static void ixgbe_update_link_status(struct ixgbe_softc *);
223 1.333 msaitoh static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
224 1.333 msaitoh static void ixgbe_configure_ivars(struct ixgbe_softc *);
225 1.1 dyoung static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
226 1.333 msaitoh static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
227 1.1 dyoung
228 1.333 msaitoh static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *);
229 1.333 msaitoh static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *);
230 1.193 msaitoh static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
231 1.333 msaitoh static int ixgbe_register_vlan(struct ixgbe_softc *, u16);
232 1.333 msaitoh static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16);
233 1.1 dyoung
234 1.333 msaitoh static void ixgbe_add_device_sysctls(struct ixgbe_softc *);
235 1.333 msaitoh static void ixgbe_add_hw_stats(struct ixgbe_softc *);
236 1.333 msaitoh static void ixgbe_clear_evcnt(struct ixgbe_softc *);
237 1.333 msaitoh static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238 1.333 msaitoh static int ixgbe_set_advertise(struct ixgbe_softc *, int);
239 1.333 msaitoh static int ixgbe_get_default_advertise(struct ixgbe_softc *);
240 1.44 msaitoh
241 1.44 msaitoh /* Sysctl handlers */
242 1.52 msaitoh static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
243 1.52 msaitoh static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
244 1.186 msaitoh static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
245 1.44 msaitoh static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
246 1.44 msaitoh static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
247 1.44 msaitoh static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
248 1.48 msaitoh #ifdef IXGBE_DEBUG
249 1.48 msaitoh static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
250 1.48 msaitoh static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
251 1.48 msaitoh #endif
252 1.186 msaitoh static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
253 1.287 msaitoh static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
254 1.186 msaitoh static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
255 1.186 msaitoh static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
256 1.186 msaitoh static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
257 1.186 msaitoh static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
258 1.186 msaitoh static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
259 1.158 msaitoh static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
260 1.286 msaitoh static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
261 1.313 msaitoh static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
262 1.313 msaitoh static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
263 1.44 msaitoh static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
264 1.44 msaitoh static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
265 1.1 dyoung
266 1.277 msaitoh /* Interrupt functions */
267 1.34 msaitoh static int ixgbe_msix_que(void *);
268 1.233 msaitoh static int ixgbe_msix_admin(void *);
269 1.333 msaitoh static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *);
270 1.277 msaitoh static int ixgbe_legacy_irq(void *);
271 1.1 dyoung
272 1.233 msaitoh /* Event handlers running on workqueue */
273 1.1 dyoung static void ixgbe_handle_que(void *);
274 1.1 dyoung static void ixgbe_handle_link(void *);
275 1.233 msaitoh static void ixgbe_handle_msf(void *);
276 1.273 msaitoh static void ixgbe_handle_mod(void *, bool);
277 1.44 msaitoh static void ixgbe_handle_phy(void *);
278 1.1 dyoung
279 1.233 msaitoh /* Deferred workqueue handlers */
280 1.233 msaitoh static void ixgbe_handle_admin(struct work *, void *);
281 1.128 knakahar static void ixgbe_handle_que_work(struct work *, void *);
282 1.128 knakahar
283 1.159 maxv static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
284 1.1 dyoung
285 1.99 msaitoh /************************************************************************
286 1.99 msaitoh * NetBSD Device Interface Entry Points
287 1.99 msaitoh ************************************************************************/
288 1.333 msaitoh CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc),
289 1.1 dyoung ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
290 1.1 dyoung DVF_DETACH_SHUTDOWN);
291 1.1 dyoung
292 1.1 dyoung #if 0
293 1.44 msaitoh devclass_t ix_devclass;
294 1.44 msaitoh DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
295 1.1 dyoung
296 1.44 msaitoh MODULE_DEPEND(ix, pci, 1, 1, 1);
297 1.44 msaitoh MODULE_DEPEND(ix, ether, 1, 1, 1);
298 1.115 msaitoh #ifdef DEV_NETMAP
299 1.115 msaitoh MODULE_DEPEND(ix, netmap, 1, 1, 1);
300 1.115 msaitoh #endif
301 1.1 dyoung #endif
302 1.1 dyoung
303 1.1 dyoung /*
304 1.99 msaitoh * TUNEABLE PARAMETERS:
305 1.99 msaitoh */
306 1.1 dyoung
307 1.1 dyoung /*
308 1.99 msaitoh * AIM: Adaptive Interrupt Moderation
309 1.99 msaitoh * which means that the interrupt rate
310 1.99 msaitoh * is varied over time based on the
311 1.99 msaitoh * traffic for that interrupt vector
312 1.99 msaitoh */
313 1.73 msaitoh static bool ixgbe_enable_aim = true;
314 1.52 msaitoh #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
315 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
316 1.52 msaitoh "Enable adaptive interrupt moderation");
317 1.1 dyoung
318 1.22 msaitoh static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
319 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
320 1.52 msaitoh &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
321 1.1 dyoung
322 1.1 dyoung /* How many packets rxeof tries to clean at a time */
323 1.1 dyoung static int ixgbe_rx_process_limit = 256;
324 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
325 1.99 msaitoh &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
326 1.1 dyoung
327 1.28 msaitoh /* How many packets txeof tries to clean at a time */
328 1.28 msaitoh static int ixgbe_tx_process_limit = 256;
329 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
330 1.52 msaitoh &ixgbe_tx_process_limit, 0,
331 1.99 msaitoh "Maximum number of sent packets to process at a time, -1 means unlimited");
332 1.52 msaitoh
333 1.52 msaitoh /* Flow control setting, default to full */
334 1.52 msaitoh static int ixgbe_flow_control = ixgbe_fc_full;
335 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
336 1.52 msaitoh &ixgbe_flow_control, 0, "Default flow control used for all adapters");
337 1.52 msaitoh
338 1.179 msaitoh /* Which packet processing uses workqueue or softint */
339 1.128 knakahar static bool ixgbe_txrx_workqueue = false;
340 1.128 knakahar
341 1.1 dyoung /*
342 1.99 msaitoh * Smart speed setting, default to on
343 1.99 msaitoh * this only works as a compile option
344 1.99 msaitoh * right now as its during attach, set
345 1.99 msaitoh * this to 'ixgbe_smart_speed_off' to
346 1.99 msaitoh * disable.
347 1.99 msaitoh */
348 1.1 dyoung static int ixgbe_smart_speed = ixgbe_smart_speed_on;
349 1.1 dyoung
350 1.1 dyoung /*
351 1.99 msaitoh * MSI-X should be the default for best performance,
352 1.1 dyoung * but this allows it to be forced off for testing.
353 1.1 dyoung */
354 1.1 dyoung static int ixgbe_enable_msix = 1;
355 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
356 1.52 msaitoh "Enable MSI-X interrupts");
357 1.1 dyoung
358 1.1 dyoung /*
359 1.1 dyoung * Number of Queues, can be set to 0,
360 1.1 dyoung * it then autoconfigures based on the
361 1.350 msaitoh * number of cpus and number of MSI-X vectors.
362 1.350 msaitoh * This can be overridden manually here.
363 1.1 dyoung */
364 1.62 msaitoh static int ixgbe_num_queues = 0;
365 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
366 1.52 msaitoh "Number of queues to configure, 0 indicates autoconfigure");
367 1.1 dyoung
368 1.1 dyoung /*
369 1.99 msaitoh * Number of TX descriptors per ring,
370 1.99 msaitoh * setting higher than RX as this seems
371 1.99 msaitoh * the better performing choice.
372 1.99 msaitoh */
373 1.335 msaitoh static int ixgbe_txd = DEFAULT_TXD;
374 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
375 1.52 msaitoh "Number of transmit descriptors per queue");
376 1.1 dyoung
377 1.1 dyoung /* Number of RX descriptors per ring */
378 1.335 msaitoh static int ixgbe_rxd = DEFAULT_RXD;
379 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
380 1.52 msaitoh "Number of receive descriptors per queue");
381 1.33 msaitoh
382 1.33 msaitoh /*
383 1.99 msaitoh * Defining this on will allow the use
384 1.99 msaitoh * of unsupported SFP+ modules, note that
385 1.99 msaitoh * doing so you are on your own :)
386 1.99 msaitoh */
387 1.35 msaitoh static int allow_unsupported_sfp = false;
388 1.52 msaitoh #define TUNABLE_INT(__x, __y)
389 1.52 msaitoh TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
390 1.1 dyoung
391 1.99 msaitoh /*
392 1.99 msaitoh * Not sure if Flow Director is fully baked,
393 1.99 msaitoh * so we'll default to turning it off.
394 1.99 msaitoh */
395 1.99 msaitoh static int ixgbe_enable_fdir = 0;
396 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
397 1.99 msaitoh "Enable Flow Director");
398 1.99 msaitoh
399 1.99 msaitoh /* Legacy Transmit (single queue) */
400 1.99 msaitoh static int ixgbe_enable_legacy_tx = 0;
401 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
402 1.99 msaitoh &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
403 1.99 msaitoh
404 1.99 msaitoh /* Receive-Side Scaling */
405 1.99 msaitoh static int ixgbe_enable_rss = 1;
406 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
407 1.99 msaitoh "Enable Receive-Side Scaling (RSS)");
408 1.99 msaitoh
409 1.99 msaitoh #if 0
410 1.99 msaitoh static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
411 1.99 msaitoh static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
412 1.1 dyoung #endif
413 1.1 dyoung
414 1.80 msaitoh #ifdef NET_MPSAFE
415 1.80 msaitoh #define IXGBE_MPSAFE 1
416 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
417 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
418 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
419 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
420 1.80 msaitoh #else
421 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS 0
422 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS 0
423 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
424 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS 0
425 1.80 msaitoh #endif
426 1.128 knakahar #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
427 1.80 msaitoh
428 1.312 msaitoh /* Interval between reports of errors */
429 1.312 msaitoh static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
430 1.312 msaitoh
431 1.99 msaitoh /************************************************************************
432 1.99 msaitoh * ixgbe_initialize_rss_mapping
433 1.99 msaitoh ************************************************************************/
434 1.98 msaitoh static void
435 1.333 msaitoh ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
436 1.1 dyoung {
437 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
438 1.186 msaitoh u32 reta = 0, mrqc, rss_key[10];
439 1.186 msaitoh int queue_id, table_size, index_mult;
440 1.186 msaitoh int i, j;
441 1.186 msaitoh u32 rss_hash_config;
442 1.99 msaitoh
443 1.122 knakahar /* force use default RSS key. */
444 1.122 knakahar #ifdef __NetBSD__
445 1.122 knakahar rss_getkey((uint8_t *) &rss_key);
446 1.122 knakahar #else
447 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
448 1.99 msaitoh /* Fetch the configured RSS key */
449 1.99 msaitoh rss_getkey((uint8_t *) &rss_key);
450 1.99 msaitoh } else {
451 1.99 msaitoh /* set up random bits */
452 1.99 msaitoh cprng_fast(&rss_key, sizeof(rss_key));
453 1.99 msaitoh }
454 1.122 knakahar #endif
455 1.1 dyoung
456 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
457 1.98 msaitoh index_mult = 0x1;
458 1.98 msaitoh table_size = 128;
459 1.333 msaitoh switch (sc->hw.mac.type) {
460 1.98 msaitoh case ixgbe_mac_82598EB:
461 1.98 msaitoh index_mult = 0x11;
462 1.98 msaitoh break;
463 1.98 msaitoh case ixgbe_mac_X550:
464 1.98 msaitoh case ixgbe_mac_X550EM_x:
465 1.99 msaitoh case ixgbe_mac_X550EM_a:
466 1.98 msaitoh table_size = 512;
467 1.98 msaitoh break;
468 1.98 msaitoh default:
469 1.98 msaitoh break;
470 1.98 msaitoh }
471 1.1 dyoung
472 1.98 msaitoh /* Set up the redirection table */
473 1.99 msaitoh for (i = 0, j = 0; i < table_size; i++, j++) {
474 1.333 msaitoh if (j == sc->num_queues)
475 1.99 msaitoh j = 0;
476 1.99 msaitoh
477 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
478 1.99 msaitoh /*
479 1.99 msaitoh * Fetch the RSS bucket id for the given indirection
480 1.99 msaitoh * entry. Cap it at the number of configured buckets
481 1.99 msaitoh * (which is num_queues.)
482 1.99 msaitoh */
483 1.99 msaitoh queue_id = rss_get_indirection_to_bucket(i);
484 1.333 msaitoh queue_id = queue_id % sc->num_queues;
485 1.99 msaitoh } else
486 1.99 msaitoh queue_id = (j * index_mult);
487 1.99 msaitoh
488 1.98 msaitoh /*
489 1.98 msaitoh * The low 8 bits are for hash value (n+0);
490 1.98 msaitoh * The next 8 bits are for hash value (n+1), etc.
491 1.98 msaitoh */
492 1.98 msaitoh reta = reta >> 8;
493 1.98 msaitoh reta = reta | (((uint32_t) queue_id) << 24);
494 1.98 msaitoh if ((i & 3) == 3) {
495 1.98 msaitoh if (i < 128)
496 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
497 1.98 msaitoh else
498 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
499 1.99 msaitoh reta);
500 1.98 msaitoh reta = 0;
501 1.98 msaitoh }
502 1.98 msaitoh }
503 1.1 dyoung
504 1.98 msaitoh /* Now fill our hash function seeds */
505 1.99 msaitoh for (i = 0; i < 10; i++)
506 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
507 1.1 dyoung
508 1.98 msaitoh /* Perform hash on these packet types */
509 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
510 1.99 msaitoh rss_hash_config = rss_gethashconfig();
511 1.99 msaitoh else {
512 1.99 msaitoh /*
513 1.99 msaitoh * Disable UDP - IP fragments aren't currently being handled
514 1.99 msaitoh * and so we end up with a mix of 2-tuple and 4-tuple
515 1.99 msaitoh * traffic.
516 1.99 msaitoh */
517 1.99 msaitoh rss_hash_config = RSS_HASHTYPE_RSS_IPV4
518 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV4
519 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6
520 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6
521 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6_EX
522 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
523 1.99 msaitoh }
524 1.99 msaitoh
525 1.98 msaitoh mrqc = IXGBE_MRQC_RSSEN;
526 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
527 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
528 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
529 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
530 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
531 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
532 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
533 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
534 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
535 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
536 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
537 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
538 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
539 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
540 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
541 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
542 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
543 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
544 1.333 msaitoh mrqc |= ixgbe_get_mrqc(sc->iov_mode);
545 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
546 1.99 msaitoh } /* ixgbe_initialize_rss_mapping */
547 1.1 dyoung
548 1.99 msaitoh /************************************************************************
549 1.99 msaitoh * ixgbe_initialize_receive_units - Setup receive registers and features.
550 1.99 msaitoh ************************************************************************/
551 1.98 msaitoh #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
552 1.185 msaitoh
553 1.1 dyoung static void
554 1.333 msaitoh ixgbe_initialize_receive_units(struct ixgbe_softc *sc)
555 1.1 dyoung {
556 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
557 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
558 1.333 msaitoh struct ifnet *ifp = sc->ifp;
559 1.186 msaitoh int i, j;
560 1.98 msaitoh u32 bufsz, fctrl, srrctl, rxcsum;
561 1.98 msaitoh u32 hlreg;
562 1.98 msaitoh
563 1.98 msaitoh /*
564 1.98 msaitoh * Make sure receives are disabled while
565 1.98 msaitoh * setting up the descriptor ring
566 1.98 msaitoh */
567 1.98 msaitoh ixgbe_disable_rx(hw);
568 1.1 dyoung
569 1.98 msaitoh /* Enable broadcasts */
570 1.98 msaitoh fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
571 1.98 msaitoh fctrl |= IXGBE_FCTRL_BAM;
572 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) {
573 1.98 msaitoh fctrl |= IXGBE_FCTRL_DPF;
574 1.98 msaitoh fctrl |= IXGBE_FCTRL_PMCF;
575 1.98 msaitoh }
576 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
577 1.1 dyoung
578 1.98 msaitoh /* Set for Jumbo Frames? */
579 1.98 msaitoh hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
580 1.98 msaitoh if (ifp->if_mtu > ETHERMTU)
581 1.98 msaitoh hlreg |= IXGBE_HLREG0_JUMBOEN;
582 1.98 msaitoh else
583 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_JUMBOEN;
584 1.99 msaitoh
585 1.98 msaitoh #ifdef DEV_NETMAP
586 1.99 msaitoh /* CRC stripping is conditional in Netmap */
587 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
588 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP) &&
589 1.99 msaitoh !ix_crcstrip)
590 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
591 1.60 msaitoh else
592 1.99 msaitoh #endif /* DEV_NETMAP */
593 1.98 msaitoh hlreg |= IXGBE_HLREG0_RXCRCSTRP;
594 1.99 msaitoh
595 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
596 1.1 dyoung
597 1.333 msaitoh bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
598 1.99 msaitoh IXGBE_SRRCTL_BSIZEPKT_SHIFT;
599 1.1 dyoung
600 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++) {
601 1.98 msaitoh u64 rdba = rxr->rxdma.dma_paddr;
602 1.152 msaitoh u32 reg;
603 1.98 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
604 1.98 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
605 1.99 msaitoh j = rxr->me;
606 1.1 dyoung
607 1.98 msaitoh /* Setup the Base and Length of the Rx Descriptor Ring */
608 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
609 1.98 msaitoh (rdba & 0x00000000ffffffffULL));
610 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
611 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
612 1.333 msaitoh sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
613 1.1 dyoung
614 1.98 msaitoh /* Set up the SRRCTL register */
615 1.98 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
616 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
617 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
618 1.98 msaitoh srrctl |= bufsz;
619 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
620 1.47 msaitoh
621 1.98 msaitoh /* Set RQSMR (Receive Queue Statistic Mapping) register */
622 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
623 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
624 1.98 msaitoh reg |= i << (regshift * 8);
625 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
626 1.98 msaitoh
627 1.98 msaitoh /*
628 1.98 msaitoh * Set DROP_EN iff we have no flow control and >1 queue.
629 1.98 msaitoh * Note that srrctl was cleared shortly before during reset,
630 1.98 msaitoh * so we do not need to clear the bit, but do it just in case
631 1.98 msaitoh * this code is moved elsewhere.
632 1.98 msaitoh */
633 1.333 msaitoh if ((sc->num_queues > 1) &&
634 1.333 msaitoh (sc->hw.fc.requested_mode == ixgbe_fc_none))
635 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
636 1.319 msaitoh else
637 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
638 1.98 msaitoh
639 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
640 1.98 msaitoh
641 1.98 msaitoh /* Setup the HW Rx Head and Tail Descriptor Pointers */
642 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
643 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
644 1.98 msaitoh
645 1.98 msaitoh /* Set the driver rx tail address */
646 1.98 msaitoh rxr->tail = IXGBE_RDT(rxr->me);
647 1.98 msaitoh }
648 1.98 msaitoh
649 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
650 1.99 msaitoh u32 psrtype = IXGBE_PSRTYPE_TCPHDR
651 1.186 msaitoh | IXGBE_PSRTYPE_UDPHDR
652 1.186 msaitoh | IXGBE_PSRTYPE_IPV4HDR
653 1.186 msaitoh | IXGBE_PSRTYPE_IPV6HDR;
654 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
655 1.98 msaitoh }
656 1.98 msaitoh
657 1.98 msaitoh rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
658 1.98 msaitoh
659 1.333 msaitoh ixgbe_initialize_rss_mapping(sc);
660 1.98 msaitoh
661 1.333 msaitoh if (sc->num_queues > 1) {
662 1.98 msaitoh /* RSS and RX IPP Checksum are mutually exclusive */
663 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
664 1.98 msaitoh }
665 1.98 msaitoh
666 1.98 msaitoh if (ifp->if_capenable & IFCAP_RXCSUM)
667 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
668 1.98 msaitoh
669 1.98 msaitoh /* This is useful for calculating UDP/IP fragment checksums */
670 1.98 msaitoh if (!(rxcsum & IXGBE_RXCSUM_PCSD))
671 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_IPPCSE;
672 1.98 msaitoh
673 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
674 1.98 msaitoh
675 1.99 msaitoh } /* ixgbe_initialize_receive_units */
676 1.98 msaitoh
677 1.99 msaitoh /************************************************************************
678 1.99 msaitoh * ixgbe_initialize_transmit_units - Enable transmit units.
679 1.99 msaitoh ************************************************************************/
680 1.98 msaitoh static void
681 1.333 msaitoh ixgbe_initialize_transmit_units(struct ixgbe_softc *sc)
682 1.98 msaitoh {
683 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
684 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
685 1.144 msaitoh int i;
686 1.98 msaitoh
687 1.225 msaitoh INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
688 1.225 msaitoh
689 1.98 msaitoh /* Setup the Base and Length of the Tx Descriptor Ring */
690 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, txr++) {
691 1.99 msaitoh u64 tdba = txr->txdma.dma_paddr;
692 1.99 msaitoh u32 txctrl = 0;
693 1.152 msaitoh u32 tqsmreg, reg;
694 1.152 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
695 1.152 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
696 1.99 msaitoh int j = txr->me;
697 1.98 msaitoh
698 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
699 1.98 msaitoh (tdba & 0x00000000ffffffffULL));
700 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
701 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
702 1.333 msaitoh sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
703 1.98 msaitoh
704 1.152 msaitoh /*
705 1.152 msaitoh * Set TQSMR (Transmit Queue Statistic Mapping) register.
706 1.152 msaitoh * Register location is different between 82598 and others.
707 1.152 msaitoh */
708 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
709 1.152 msaitoh tqsmreg = IXGBE_TQSMR(regnum);
710 1.152 msaitoh else
711 1.152 msaitoh tqsmreg = IXGBE_TQSM(regnum);
712 1.152 msaitoh reg = IXGBE_READ_REG(hw, tqsmreg);
713 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
714 1.152 msaitoh reg |= i << (regshift * 8);
715 1.152 msaitoh IXGBE_WRITE_REG(hw, tqsmreg, reg);
716 1.152 msaitoh
717 1.98 msaitoh /* Setup the HW Tx Head and Tail descriptor pointers */
718 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
719 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
720 1.98 msaitoh
721 1.98 msaitoh /* Cache the tail address */
722 1.98 msaitoh txr->tail = IXGBE_TDT(j);
723 1.98 msaitoh
724 1.155 msaitoh txr->txr_no_space = false;
725 1.155 msaitoh
726 1.345 msaitoh /* Disable relax ordering */
727 1.98 msaitoh /*
728 1.98 msaitoh * Note: for X550 series devices, these registers are actually
729 1.295 andvar * prefixed with TPH_ instead of DCA_, but the addresses and
730 1.98 msaitoh * fields remain the same.
731 1.98 msaitoh */
732 1.98 msaitoh switch (hw->mac.type) {
733 1.98 msaitoh case ixgbe_mac_82598EB:
734 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
735 1.98 msaitoh break;
736 1.98 msaitoh default:
737 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
738 1.98 msaitoh break;
739 1.98 msaitoh }
740 1.98 msaitoh txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
741 1.98 msaitoh switch (hw->mac.type) {
742 1.98 msaitoh case ixgbe_mac_82598EB:
743 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
744 1.98 msaitoh break;
745 1.98 msaitoh default:
746 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
747 1.98 msaitoh break;
748 1.98 msaitoh }
749 1.98 msaitoh
750 1.98 msaitoh }
751 1.98 msaitoh
752 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
753 1.98 msaitoh u32 dmatxctl, rttdcs;
754 1.99 msaitoh
755 1.98 msaitoh dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
756 1.98 msaitoh dmatxctl |= IXGBE_DMATXCTL_TE;
757 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
758 1.98 msaitoh /* Disable arbiter to set MTQC */
759 1.98 msaitoh rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
760 1.98 msaitoh rttdcs |= IXGBE_RTTDCS_ARBDIS;
761 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC,
763 1.333 msaitoh ixgbe_get_mtqc(sc->iov_mode));
764 1.98 msaitoh rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
765 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
766 1.98 msaitoh }
767 1.99 msaitoh } /* ixgbe_initialize_transmit_units */
768 1.98 msaitoh
769 1.245 msaitoh static void
770 1.333 msaitoh ixgbe_quirks(struct ixgbe_softc *sc)
771 1.245 msaitoh {
772 1.333 msaitoh device_t dev = sc->dev;
773 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
774 1.245 msaitoh const char *vendor, *product;
775 1.245 msaitoh
776 1.248 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
777 1.248 msaitoh /*
778 1.248 msaitoh * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
779 1.248 msaitoh * MA10-ST0.
780 1.248 msaitoh */
781 1.248 msaitoh vendor = pmf_get_platform("system-vendor");
782 1.248 msaitoh product = pmf_get_platform("system-product");
783 1.245 msaitoh
784 1.248 msaitoh if ((vendor == NULL) || (product == NULL))
785 1.248 msaitoh return;
786 1.245 msaitoh
787 1.248 msaitoh if ((strcmp(vendor, "GIGABYTE") == 0) &&
788 1.248 msaitoh (strcmp(product, "MA10-ST0") == 0)) {
789 1.248 msaitoh aprint_verbose_dev(dev,
790 1.248 msaitoh "Enable SFP+ MOD_ABS inverse quirk\n");
791 1.333 msaitoh sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
792 1.248 msaitoh }
793 1.245 msaitoh }
794 1.245 msaitoh }
795 1.245 msaitoh
796 1.99 msaitoh /************************************************************************
797 1.99 msaitoh * ixgbe_attach - Device initialization routine
798 1.98 msaitoh *
799 1.99 msaitoh * Called when the driver is being loaded.
800 1.99 msaitoh * Identifies the type of hardware, allocates all resources
801 1.99 msaitoh * and initializes the hardware.
802 1.98 msaitoh *
803 1.99 msaitoh * return 0 on success, positive on failure
804 1.99 msaitoh ************************************************************************/
805 1.98 msaitoh static void
806 1.98 msaitoh ixgbe_attach(device_t parent, device_t dev, void *aux)
807 1.98 msaitoh {
808 1.333 msaitoh struct ixgbe_softc *sc;
809 1.98 msaitoh struct ixgbe_hw *hw;
810 1.186 msaitoh int error = -1;
811 1.98 msaitoh u32 ctrl_ext;
812 1.340 msaitoh u16 high, low, nvmreg, dev_caps;
813 1.99 msaitoh pcireg_t id, subid;
814 1.159 maxv const ixgbe_vendor_info_t *ent;
815 1.98 msaitoh struct pci_attach_args *pa = aux;
816 1.219 msaitoh bool unsupported_sfp = false;
817 1.98 msaitoh const char *str;
818 1.233 msaitoh char wqname[MAXCOMLEN];
819 1.99 msaitoh char buf[256];
820 1.98 msaitoh
821 1.98 msaitoh INIT_DEBUGOUT("ixgbe_attach: begin");
822 1.98 msaitoh
823 1.98 msaitoh /* Allocate, clear, and link in our adapter structure */
824 1.333 msaitoh sc = device_private(dev);
825 1.333 msaitoh sc->hw.back = sc;
826 1.333 msaitoh sc->dev = dev;
827 1.333 msaitoh hw = &sc->hw;
828 1.333 msaitoh sc->osdep.pc = pa->pa_pc;
829 1.333 msaitoh sc->osdep.tag = pa->pa_tag;
830 1.98 msaitoh if (pci_dma64_available(pa))
831 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat64;
832 1.98 msaitoh else
833 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat;
834 1.333 msaitoh sc->osdep.attached = false;
835 1.333 msaitoh sc->osdep.detaching = false;
836 1.98 msaitoh
837 1.98 msaitoh ent = ixgbe_lookup(pa);
838 1.98 msaitoh
839 1.98 msaitoh KASSERT(ent != NULL);
840 1.98 msaitoh
841 1.98 msaitoh aprint_normal(": %s, Version - %s\n",
842 1.98 msaitoh ixgbe_strings[ent->index], ixgbe_driver_version);
843 1.98 msaitoh
844 1.233 msaitoh /* Core Lock Init */
845 1.333 msaitoh IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
846 1.1 dyoung
847 1.233 msaitoh /* Set up the timer callout and workqueue */
848 1.333 msaitoh callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
849 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
850 1.333 msaitoh error = workqueue_create(&sc->timer_wq, wqname,
851 1.333 msaitoh ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
852 1.233 msaitoh IXGBE_TASKLET_WQ_FLAGS);
853 1.233 msaitoh if (error) {
854 1.233 msaitoh aprint_error_dev(dev,
855 1.233 msaitoh "could not create timer workqueue (%d)\n", error);
856 1.233 msaitoh goto err_out;
857 1.233 msaitoh }
858 1.1 dyoung
859 1.1 dyoung /* Determine hardware revision */
860 1.99 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
861 1.99 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
862 1.99 msaitoh
863 1.99 msaitoh hw->vendor_id = PCI_VENDOR(id);
864 1.99 msaitoh hw->device_id = PCI_PRODUCT(id);
865 1.99 msaitoh hw->revision_id =
866 1.99 msaitoh PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
867 1.99 msaitoh hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
868 1.99 msaitoh hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
869 1.99 msaitoh
870 1.248 msaitoh /* Set quirk flags */
871 1.333 msaitoh ixgbe_quirks(sc);
872 1.248 msaitoh
873 1.99 msaitoh /*
874 1.99 msaitoh * Make sure BUSMASTER is set
875 1.99 msaitoh */
876 1.99 msaitoh ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
877 1.99 msaitoh
878 1.99 msaitoh /* Do base PCI setup - map BAR0 */
879 1.333 msaitoh if (ixgbe_allocate_pci_resources(sc, pa)) {
880 1.99 msaitoh aprint_error_dev(dev, "Allocation of PCI resources failed\n");
881 1.99 msaitoh error = ENXIO;
882 1.99 msaitoh goto err_out;
883 1.99 msaitoh }
884 1.99 msaitoh
885 1.99 msaitoh /* let hardware know driver is loaded */
886 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
887 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
888 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
889 1.99 msaitoh
890 1.99 msaitoh /*
891 1.99 msaitoh * Initialize the shared code
892 1.99 msaitoh */
893 1.144 msaitoh if (ixgbe_init_shared_code(hw) != 0) {
894 1.319 msaitoh aprint_error_dev(dev,
895 1.319 msaitoh "Unable to initialize the shared code\n");
896 1.99 msaitoh error = ENXIO;
897 1.99 msaitoh goto err_out;
898 1.99 msaitoh }
899 1.1 dyoung
900 1.79 msaitoh switch (hw->mac.type) {
901 1.79 msaitoh case ixgbe_mac_82598EB:
902 1.79 msaitoh str = "82598EB";
903 1.79 msaitoh break;
904 1.79 msaitoh case ixgbe_mac_82599EB:
905 1.79 msaitoh str = "82599EB";
906 1.79 msaitoh break;
907 1.79 msaitoh case ixgbe_mac_X540:
908 1.79 msaitoh str = "X540";
909 1.79 msaitoh break;
910 1.79 msaitoh case ixgbe_mac_X550:
911 1.79 msaitoh str = "X550";
912 1.79 msaitoh break;
913 1.79 msaitoh case ixgbe_mac_X550EM_x:
914 1.246 msaitoh str = "X550EM X";
915 1.79 msaitoh break;
916 1.99 msaitoh case ixgbe_mac_X550EM_a:
917 1.99 msaitoh str = "X550EM A";
918 1.99 msaitoh break;
919 1.79 msaitoh default:
920 1.79 msaitoh str = "Unknown";
921 1.79 msaitoh break;
922 1.79 msaitoh }
923 1.79 msaitoh aprint_normal_dev(dev, "device %s\n", str);
924 1.79 msaitoh
925 1.99 msaitoh hw->allow_unsupported_sfp = allow_unsupported_sfp;
926 1.99 msaitoh
927 1.99 msaitoh /* Pick up the 82599 settings */
928 1.292 msaitoh if (hw->mac.type != ixgbe_mac_82598EB)
929 1.99 msaitoh hw->phy.smart_speed = ixgbe_smart_speed;
930 1.292 msaitoh
931 1.292 msaitoh /* Set the right number of segments */
932 1.292 msaitoh KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
933 1.333 msaitoh sc->num_segs = IXGBE_SCATTER_DEFAULT;
934 1.99 msaitoh
935 1.172 msaitoh /* Ensure SW/FW semaphore is free */
936 1.172 msaitoh ixgbe_init_swfw_semaphore(hw);
937 1.172 msaitoh
938 1.113 msaitoh hw->mac.ops.set_lan_id(hw);
939 1.333 msaitoh ixgbe_init_device_features(sc);
940 1.99 msaitoh
941 1.333 msaitoh if (ixgbe_configure_interrupts(sc)) {
942 1.1 dyoung error = ENXIO;
943 1.1 dyoung goto err_out;
944 1.1 dyoung }
945 1.1 dyoung
946 1.99 msaitoh /* Allocate multicast array memory. */
947 1.333 msaitoh sc->mta = malloc(sizeof(*sc->mta) *
948 1.215 chs MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949 1.99 msaitoh
950 1.99 msaitoh /* Enable WoL (if supported) */
951 1.333 msaitoh ixgbe_check_wol_support(sc);
952 1.99 msaitoh
953 1.193 msaitoh /* Register for VLAN events */
954 1.333 msaitoh ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb);
955 1.193 msaitoh
956 1.99 msaitoh /* Verify adapter fan is still functional (if applicable) */
957 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 1.99 msaitoh u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 1.333 msaitoh ixgbe_check_fan_failure(sc, esdp, FALSE);
960 1.99 msaitoh }
961 1.99 msaitoh
962 1.99 msaitoh /* Set an initial default flow control value */
963 1.99 msaitoh hw->fc.requested_mode = ixgbe_flow_control;
964 1.99 msaitoh
965 1.1 dyoung /* Do descriptor calc and sanity checks */
966 1.1 dyoung if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 1.1 dyoung ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 1.336 msaitoh aprint_error_dev(dev, "Invalid TX ring size (%d). "
969 1.336 msaitoh "It must be between %d and %d, "
970 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
971 1.336 msaitoh "Using default value of %d instead.\n",
972 1.336 msaitoh ixgbe_txd, MIN_TXD, MAX_TXD,
973 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
974 1.336 msaitoh DEFAULT_TXD);
975 1.333 msaitoh sc->num_tx_desc = DEFAULT_TXD;
976 1.1 dyoung } else
977 1.333 msaitoh sc->num_tx_desc = ixgbe_txd;
978 1.1 dyoung
979 1.1 dyoung if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 1.33 msaitoh ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 1.336 msaitoh aprint_error_dev(dev, "Invalid RX ring size (%d). "
982 1.336 msaitoh "It must be between %d and %d, "
983 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
984 1.336 msaitoh "Using default value of %d instead.\n",
985 1.336 msaitoh ixgbe_rxd, MIN_RXD, MAX_RXD,
986 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
987 1.336 msaitoh DEFAULT_RXD);
988 1.333 msaitoh sc->num_rx_desc = DEFAULT_RXD;
989 1.1 dyoung } else
990 1.333 msaitoh sc->num_rx_desc = ixgbe_rxd;
991 1.1 dyoung
992 1.313 msaitoh /* Sysctls for limiting the amount of work done in the taskqueues */
993 1.333 msaitoh sc->rx_process_limit
994 1.333 msaitoh = (ixgbe_rx_process_limit <= sc->num_rx_desc)
995 1.333 msaitoh ? ixgbe_rx_process_limit : sc->num_rx_desc;
996 1.333 msaitoh sc->tx_process_limit
997 1.333 msaitoh = (ixgbe_tx_process_limit <= sc->num_tx_desc)
998 1.333 msaitoh ? ixgbe_tx_process_limit : sc->num_tx_desc;
999 1.313 msaitoh
1000 1.286 msaitoh /* Set default high limit of copying mbuf in rxeof */
1001 1.333 msaitoh sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
1002 1.286 msaitoh
1003 1.1 dyoung /* Allocate our TX/RX Queues */
1004 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1005 1.1 dyoung error = ENOMEM;
1006 1.1 dyoung goto err_out;
1007 1.1 dyoung }
1008 1.1 dyoung
1009 1.99 msaitoh hw->phy.reset_if_overtemp = TRUE;
1010 1.99 msaitoh error = ixgbe_reset_hw(hw);
1011 1.99 msaitoh hw->phy.reset_if_overtemp = FALSE;
1012 1.237 msaitoh if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1013 1.99 msaitoh error = IXGBE_SUCCESS;
1014 1.237 msaitoh else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1015 1.324 msaitoh aprint_error_dev(dev,
1016 1.324 msaitoh "Unsupported SFP+ module type was detected.\n");
1017 1.219 msaitoh unsupported_sfp = true;
1018 1.219 msaitoh error = IXGBE_SUCCESS;
1019 1.1 dyoung } else if (error) {
1020 1.282 msaitoh aprint_error_dev(dev,
1021 1.282 msaitoh "Hardware initialization failed(error = %d)\n", error);
1022 1.1 dyoung error = EIO;
1023 1.1 dyoung goto err_late;
1024 1.1 dyoung }
1025 1.1 dyoung
1026 1.1 dyoung /* Make sure we have a good EEPROM before we read from it */
1027 1.333 msaitoh if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1028 1.48 msaitoh aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1029 1.1 dyoung error = EIO;
1030 1.1 dyoung goto err_late;
1031 1.1 dyoung }
1032 1.1 dyoung
1033 1.88 msaitoh aprint_normal("%s:", device_xname(dev));
1034 1.88 msaitoh /* NVM Image Version */
1035 1.169 msaitoh high = low = 0;
1036 1.88 msaitoh switch (hw->mac.type) {
1037 1.300 msaitoh case ixgbe_mac_82598EB:
1038 1.300 msaitoh /*
1039 1.300 msaitoh * Print version from the dev starter version (0x29). The
1040 1.300 msaitoh * location is the same as newer device's IXGBE_NVM_MAP_VER.
1041 1.300 msaitoh */
1042 1.300 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1043 1.300 msaitoh if (nvmreg == 0xffff)
1044 1.300 msaitoh break;
1045 1.300 msaitoh high = (nvmreg >> 12) & 0x0f;
1046 1.300 msaitoh low = (nvmreg >> 4) & 0xff;
1047 1.300 msaitoh id = nvmreg & 0x0f;
1048 1.300 msaitoh /*
1049 1.300 msaitoh * The following output might not be correct. Some 82598 cards
1050 1.300 msaitoh * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1051 1.300 msaitoh */
1052 1.300 msaitoh aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1053 1.300 msaitoh break;
1054 1.88 msaitoh case ixgbe_mac_X540:
1055 1.99 msaitoh case ixgbe_mac_X550EM_a:
1056 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1057 1.88 msaitoh if (nvmreg == 0xffff)
1058 1.88 msaitoh break;
1059 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1060 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1061 1.88 msaitoh id = nvmreg & 0x0f;
1062 1.107 msaitoh aprint_normal(" NVM Image Version %u.", high);
1063 1.107 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1064 1.107 msaitoh str = "%x";
1065 1.107 msaitoh else
1066 1.107 msaitoh str = "%02x";
1067 1.107 msaitoh aprint_normal(str, low);
1068 1.107 msaitoh aprint_normal(" ID 0x%x,", id);
1069 1.88 msaitoh break;
1070 1.88 msaitoh case ixgbe_mac_X550EM_x:
1071 1.88 msaitoh case ixgbe_mac_X550:
1072 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1073 1.88 msaitoh if (nvmreg == 0xffff)
1074 1.88 msaitoh break;
1075 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1076 1.88 msaitoh low = nvmreg & 0xff;
1077 1.107 msaitoh aprint_normal(" NVM Image Version %u.%02x,", high, low);
1078 1.88 msaitoh break;
1079 1.88 msaitoh default:
1080 1.88 msaitoh break;
1081 1.88 msaitoh }
1082 1.169 msaitoh hw->eeprom.nvm_image_ver_high = high;
1083 1.169 msaitoh hw->eeprom.nvm_image_ver_low = low;
1084 1.88 msaitoh
1085 1.88 msaitoh /* PHY firmware revision */
1086 1.88 msaitoh switch (hw->mac.type) {
1087 1.88 msaitoh case ixgbe_mac_X540:
1088 1.88 msaitoh case ixgbe_mac_X550:
1089 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1090 1.88 msaitoh if (nvmreg == 0xffff)
1091 1.88 msaitoh break;
1092 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1093 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1094 1.88 msaitoh id = nvmreg & 0x000f;
1095 1.114 msaitoh aprint_normal(" PHY FW Revision %u.", high);
1096 1.114 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1097 1.114 msaitoh str = "%x";
1098 1.114 msaitoh else
1099 1.114 msaitoh str = "%02x";
1100 1.114 msaitoh aprint_normal(str, low);
1101 1.114 msaitoh aprint_normal(" ID 0x%x,", id);
1102 1.88 msaitoh break;
1103 1.88 msaitoh default:
1104 1.88 msaitoh break;
1105 1.88 msaitoh }
1106 1.88 msaitoh
1107 1.88 msaitoh /* NVM Map version & OEM NVM Image version */
1108 1.88 msaitoh switch (hw->mac.type) {
1109 1.88 msaitoh case ixgbe_mac_X550:
1110 1.88 msaitoh case ixgbe_mac_X550EM_x:
1111 1.99 msaitoh case ixgbe_mac_X550EM_a:
1112 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1113 1.88 msaitoh if (nvmreg != 0xffff) {
1114 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1115 1.88 msaitoh low = nvmreg & 0x00ff;
1116 1.88 msaitoh aprint_normal(" NVM Map version %u.%02x,", high, low);
1117 1.88 msaitoh }
1118 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1119 1.107 msaitoh if (nvmreg != 0xffff) {
1120 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1121 1.88 msaitoh low = nvmreg & 0x00ff;
1122 1.88 msaitoh aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1123 1.88 msaitoh low);
1124 1.88 msaitoh }
1125 1.88 msaitoh break;
1126 1.88 msaitoh default:
1127 1.88 msaitoh break;
1128 1.88 msaitoh }
1129 1.88 msaitoh
1130 1.88 msaitoh /* Print the ETrackID */
1131 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1132 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1133 1.88 msaitoh aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1134 1.79 msaitoh
1135 1.307 msaitoh /* Printed Board Assembly number */
1136 1.307 msaitoh error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
1137 1.307 msaitoh aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
1138 1.307 msaitoh
1139 1.351 msaitoh /* Recovery mode */
1140 1.351 msaitoh switch (sc->hw.mac.type) {
1141 1.351 msaitoh case ixgbe_mac_X550:
1142 1.351 msaitoh case ixgbe_mac_X550EM_x:
1143 1.351 msaitoh case ixgbe_mac_X550EM_a:
1144 1.351 msaitoh /* >= 2.00 */
1145 1.351 msaitoh if (hw->eeprom.nvm_image_ver_high >= 2) {
1146 1.351 msaitoh sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1147 1.351 msaitoh sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1148 1.351 msaitoh }
1149 1.351 msaitoh break;
1150 1.351 msaitoh default:
1151 1.351 msaitoh break;
1152 1.351 msaitoh }
1153 1.351 msaitoh
1154 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
1155 1.333 msaitoh error = ixgbe_allocate_msix(sc, pa);
1156 1.119 msaitoh if (error) {
1157 1.119 msaitoh /* Free allocated queue structures first */
1158 1.333 msaitoh ixgbe_free_queues(sc);
1159 1.119 msaitoh
1160 1.119 msaitoh /* Fallback to legacy interrupt */
1161 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_MSI)
1162 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
1163 1.333 msaitoh sc->num_queues = 1;
1164 1.119 msaitoh
1165 1.119 msaitoh /* Allocate our TX/RX Queues again */
1166 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1167 1.119 msaitoh error = ENOMEM;
1168 1.119 msaitoh goto err_out;
1169 1.119 msaitoh }
1170 1.119 msaitoh }
1171 1.119 msaitoh }
1172 1.307 msaitoh
1173 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) == 0)
1174 1.333 msaitoh error = ixgbe_allocate_legacy(sc, pa);
1175 1.185 msaitoh if (error)
1176 1.99 msaitoh goto err_late;
1177 1.99 msaitoh
1178 1.119 msaitoh /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1179 1.333 msaitoh mutex_init(&(sc)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1180 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1181 1.333 msaitoh error = workqueue_create(&sc->admin_wq, wqname,
1182 1.333 msaitoh ixgbe_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
1183 1.223 thorpej IXGBE_TASKLET_WQ_FLAGS);
1184 1.223 thorpej if (error) {
1185 1.223 thorpej aprint_error_dev(dev,
1186 1.233 msaitoh "could not create admin workqueue (%d)\n", error);
1187 1.223 thorpej goto err_out;
1188 1.223 thorpej }
1189 1.119 msaitoh
1190 1.99 msaitoh error = ixgbe_start_hw(hw);
1191 1.25 msaitoh switch (error) {
1192 1.25 msaitoh case IXGBE_ERR_EEPROM_VERSION:
1193 1.319 msaitoh aprint_error_dev(dev,
1194 1.319 msaitoh "This device is a pre-production adapter/"
1195 1.1 dyoung "LOM. Please be aware there may be issues associated "
1196 1.48 msaitoh "with your hardware.\nIf you are experiencing problems "
1197 1.1 dyoung "please contact your Intel or hardware representative "
1198 1.1 dyoung "who provided you with this hardware.\n");
1199 1.25 msaitoh break;
1200 1.25 msaitoh default:
1201 1.25 msaitoh break;
1202 1.1 dyoung }
1203 1.1 dyoung
1204 1.116 msaitoh /* Setup OS specific network interface */
1205 1.333 msaitoh if (ixgbe_setup_interface(dev, sc) != 0)
1206 1.116 msaitoh goto err_late;
1207 1.116 msaitoh
1208 1.110 msaitoh /*
1209 1.110 msaitoh * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1210 1.110 msaitoh * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1211 1.110 msaitoh */
1212 1.110 msaitoh if (hw->phy.media_type == ixgbe_media_type_copper) {
1213 1.95 msaitoh uint16_t id1, id2;
1214 1.95 msaitoh int oui, model, rev;
1215 1.285 pgoyette char descr[MII_MAX_DESCR_LEN];
1216 1.95 msaitoh
1217 1.95 msaitoh id1 = hw->phy.id >> 16;
1218 1.95 msaitoh id2 = hw->phy.id & 0xffff;
1219 1.95 msaitoh oui = MII_OUI(id1, id2);
1220 1.95 msaitoh model = MII_MODEL(id2);
1221 1.95 msaitoh rev = MII_REV(id2);
1222 1.285 pgoyette mii_get_descr(descr, sizeof(descr), oui, model);
1223 1.285 pgoyette if (descr[0])
1224 1.299 msaitoh aprint_normal_dev(dev, "PHY: %s, rev. %d\n",
1225 1.299 msaitoh descr, rev);
1226 1.95 msaitoh else
1227 1.95 msaitoh aprint_normal_dev(dev,
1228 1.95 msaitoh "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1229 1.95 msaitoh oui, model, rev);
1230 1.95 msaitoh }
1231 1.95 msaitoh
1232 1.173 msaitoh /* Enable EEE power saving */
1233 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
1234 1.173 msaitoh hw->mac.ops.setup_eee(hw,
1235 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
1236 1.173 msaitoh
1237 1.52 msaitoh /* Enable power to the phy. */
1238 1.219 msaitoh if (!unsupported_sfp) {
1239 1.219 msaitoh /* Enable the optics for 82599 SFP+ fiber */
1240 1.219 msaitoh ixgbe_enable_tx_laser(hw);
1241 1.219 msaitoh
1242 1.219 msaitoh /*
1243 1.219 msaitoh * XXX Currently, ixgbe_set_phy_power() supports only copper
1244 1.219 msaitoh * PHY, so it's not required to test with !unsupported_sfp.
1245 1.219 msaitoh */
1246 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
1247 1.219 msaitoh }
1248 1.52 msaitoh
1249 1.1 dyoung /* Initialize statistics */
1250 1.333 msaitoh ixgbe_update_stats_counters(sc);
1251 1.1 dyoung
1252 1.98 msaitoh /* Check PCIE slot type/speed/width */
1253 1.333 msaitoh ixgbe_get_slot_info(sc);
1254 1.1 dyoung
1255 1.99 msaitoh /*
1256 1.99 msaitoh * Do time init and sysctl init here, but
1257 1.99 msaitoh * only on the first port of a bypass adapter.
1258 1.99 msaitoh */
1259 1.333 msaitoh ixgbe_bypass_init(sc);
1260 1.99 msaitoh
1261 1.99 msaitoh /* Set an initial dmac value */
1262 1.333 msaitoh sc->dmac = 0;
1263 1.99 msaitoh /* Set initial advertised speeds (if applicable) */
1264 1.333 msaitoh sc->advertise = ixgbe_get_default_advertise(sc);
1265 1.45 msaitoh
1266 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1267 1.99 msaitoh ixgbe_define_iov_schemas(dev, &error);
1268 1.44 msaitoh
1269 1.44 msaitoh /* Add sysctls */
1270 1.333 msaitoh ixgbe_add_device_sysctls(sc);
1271 1.333 msaitoh ixgbe_add_hw_stats(sc);
1272 1.44 msaitoh
1273 1.99 msaitoh /* For Netmap */
1274 1.333 msaitoh sc->init_locked = ixgbe_init_locked;
1275 1.333 msaitoh sc->stop_locked = ixgbe_stop_locked;
1276 1.99 msaitoh
1277 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
1278 1.333 msaitoh ixgbe_netmap_attach(sc);
1279 1.1 dyoung
1280 1.340 msaitoh /* Print some flags */
1281 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
1282 1.99 msaitoh aprint_verbose_dev(dev, "feature cap %s\n", buf);
1283 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
1284 1.99 msaitoh aprint_verbose_dev(dev, "feature ena %s\n", buf);
1285 1.340 msaitoh if (ixgbe_get_device_caps(hw, &dev_caps) == 0) {
1286 1.340 msaitoh snprintb(buf, sizeof(buf), IXGBE_DEVICE_CAPS_FLAGS, dev_caps);
1287 1.340 msaitoh aprint_verbose_dev(dev, "device cap %s\n", buf);
1288 1.340 msaitoh }
1289 1.44 msaitoh
1290 1.44 msaitoh if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1291 1.333 msaitoh pmf_class_network_register(dev, sc->ifp);
1292 1.44 msaitoh else
1293 1.44 msaitoh aprint_error_dev(dev, "couldn't establish power handler\n");
1294 1.44 msaitoh
1295 1.169 msaitoh /* Init recovery mode timer and state variable */
1296 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1297 1.333 msaitoh sc->recovery_mode = 0;
1298 1.169 msaitoh
1299 1.169 msaitoh /* Set up the timer callout */
1300 1.333 msaitoh callout_init(&sc->recovery_mode_timer,
1301 1.169 msaitoh IXGBE_CALLOUT_FLAGS);
1302 1.235 msaitoh snprintf(wqname, sizeof(wqname), "%s-recovery",
1303 1.235 msaitoh device_xname(dev));
1304 1.333 msaitoh error = workqueue_create(&sc->recovery_mode_timer_wq,
1305 1.333 msaitoh wqname, ixgbe_handle_recovery_mode_timer, sc,
1306 1.233 msaitoh IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1307 1.233 msaitoh if (error) {
1308 1.233 msaitoh aprint_error_dev(dev, "could not create "
1309 1.233 msaitoh "recovery_mode_timer workqueue (%d)\n", error);
1310 1.233 msaitoh goto err_out;
1311 1.233 msaitoh }
1312 1.169 msaitoh
1313 1.169 msaitoh /* Start the task */
1314 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
1315 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
1316 1.169 msaitoh }
1317 1.169 msaitoh
1318 1.1 dyoung INIT_DEBUGOUT("ixgbe_attach: end");
1319 1.333 msaitoh sc->osdep.attached = true;
1320 1.98 msaitoh
1321 1.1 dyoung return;
1322 1.43 msaitoh
1323 1.1 dyoung err_late:
1324 1.333 msaitoh ixgbe_free_queues(sc);
1325 1.1 dyoung err_out:
1326 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1327 1.99 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1328 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1329 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
1330 1.333 msaitoh ixgbe_free_pci_resources(sc);
1331 1.333 msaitoh if (sc->mta != NULL)
1332 1.333 msaitoh free(sc->mta, M_DEVBUF);
1333 1.333 msaitoh mutex_destroy(&(sc)->admin_mtx); /* XXX appropriate order? */
1334 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
1335 1.99 msaitoh
1336 1.1 dyoung return;
1337 1.99 msaitoh } /* ixgbe_attach */
1338 1.1 dyoung
1339 1.99 msaitoh /************************************************************************
1340 1.99 msaitoh * ixgbe_check_wol_support
1341 1.99 msaitoh *
1342 1.99 msaitoh * Checks whether the adapter's ports are capable of
1343 1.99 msaitoh * Wake On LAN by reading the adapter's NVM.
1344 1.1 dyoung *
1345 1.99 msaitoh * Sets each port's hw->wol_enabled value depending
1346 1.99 msaitoh * on the value read here.
1347 1.99 msaitoh ************************************************************************/
1348 1.98 msaitoh static void
1349 1.333 msaitoh ixgbe_check_wol_support(struct ixgbe_softc *sc)
1350 1.98 msaitoh {
1351 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1352 1.186 msaitoh u16 dev_caps = 0;
1353 1.1 dyoung
1354 1.98 msaitoh /* Find out WoL support for port */
1355 1.333 msaitoh sc->wol_support = hw->wol_enabled = 0;
1356 1.98 msaitoh ixgbe_get_device_caps(hw, &dev_caps);
1357 1.98 msaitoh if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1358 1.98 msaitoh ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1359 1.99 msaitoh hw->bus.func == 0))
1360 1.333 msaitoh sc->wol_support = hw->wol_enabled = 1;
1361 1.98 msaitoh
1362 1.98 msaitoh /* Save initial wake up filter configuration */
1363 1.333 msaitoh sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1364 1.98 msaitoh
1365 1.98 msaitoh return;
1366 1.99 msaitoh } /* ixgbe_check_wol_support */
1367 1.98 msaitoh
1368 1.99 msaitoh /************************************************************************
1369 1.99 msaitoh * ixgbe_setup_interface
1370 1.98 msaitoh *
1371 1.99 msaitoh * Setup networking device structure and register an interface.
1372 1.99 msaitoh ************************************************************************/
1373 1.1 dyoung static int
1374 1.333 msaitoh ixgbe_setup_interface(device_t dev, struct ixgbe_softc *sc)
1375 1.1 dyoung {
1376 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
1377 1.98 msaitoh struct ifnet *ifp;
1378 1.1 dyoung
1379 1.98 msaitoh INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1380 1.1 dyoung
1381 1.333 msaitoh ifp = sc->ifp = &ec->ec_if;
1382 1.98 msaitoh strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1383 1.98 msaitoh ifp->if_baudrate = IF_Gbps(10);
1384 1.98 msaitoh ifp->if_init = ixgbe_init;
1385 1.98 msaitoh ifp->if_stop = ixgbe_ifstop;
1386 1.333 msaitoh ifp->if_softc = sc;
1387 1.98 msaitoh ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1388 1.98 msaitoh #ifdef IXGBE_MPSAFE
1389 1.112 ozaki ifp->if_extflags = IFEF_MPSAFE;
1390 1.98 msaitoh #endif
1391 1.98 msaitoh ifp->if_ioctl = ixgbe_ioctl;
1392 1.98 msaitoh #if __FreeBSD_version >= 1100045
1393 1.98 msaitoh /* TSO parameters */
1394 1.98 msaitoh ifp->if_hw_tsomax = 65518;
1395 1.98 msaitoh ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1396 1.98 msaitoh ifp->if_hw_tsomaxsegsize = 2048;
1397 1.98 msaitoh #endif
1398 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1399 1.99 msaitoh #if 0
1400 1.99 msaitoh ixgbe_start_locked = ixgbe_legacy_start_locked;
1401 1.99 msaitoh #endif
1402 1.99 msaitoh } else {
1403 1.99 msaitoh ifp->if_transmit = ixgbe_mq_start;
1404 1.99 msaitoh #if 0
1405 1.99 msaitoh ixgbe_start_locked = ixgbe_mq_start_locked;
1406 1.29 msaitoh #endif
1407 1.99 msaitoh }
1408 1.99 msaitoh ifp->if_start = ixgbe_legacy_start;
1409 1.333 msaitoh IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
1410 1.98 msaitoh IFQ_SET_READY(&ifp->if_snd);
1411 1.98 msaitoh
1412 1.284 riastrad if_initialize(ifp);
1413 1.333 msaitoh sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
1414 1.98 msaitoh /*
1415 1.98 msaitoh * We use per TX queue softint, so if_deferred_start_init() isn't
1416 1.98 msaitoh * used.
1417 1.98 msaitoh */
1418 1.98 msaitoh
1419 1.333 msaitoh sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1420 1.98 msaitoh
1421 1.98 msaitoh /*
1422 1.98 msaitoh * Tell the upper layer(s) we support long frames.
1423 1.98 msaitoh */
1424 1.98 msaitoh ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1425 1.98 msaitoh
1426 1.98 msaitoh /* Set capability flags */
1427 1.98 msaitoh ifp->if_capabilities |= IFCAP_RXCSUM
1428 1.186 msaitoh | IFCAP_TXCSUM
1429 1.186 msaitoh | IFCAP_TSOv4
1430 1.186 msaitoh | IFCAP_TSOv6;
1431 1.98 msaitoh ifp->if_capenable = 0;
1432 1.98 msaitoh
1433 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1434 1.186 msaitoh | ETHERCAP_VLAN_HWCSUM
1435 1.186 msaitoh | ETHERCAP_JUMBO_MTU
1436 1.186 msaitoh | ETHERCAP_VLAN_MTU;
1437 1.98 msaitoh
1438 1.98 msaitoh /* Enable the above capabilities by default */
1439 1.98 msaitoh ec->ec_capenable = ec->ec_capabilities;
1440 1.98 msaitoh
1441 1.347 yamaguch ether_ifattach(ifp, sc->hw.mac.addr);
1442 1.347 yamaguch aprint_normal_dev(dev, "Ethernet address %s\n",
1443 1.347 yamaguch ether_sprintf(sc->hw.mac.addr));
1444 1.347 yamaguch ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1445 1.347 yamaguch
1446 1.98 msaitoh /*
1447 1.99 msaitoh * Don't turn this on by default, if vlans are
1448 1.99 msaitoh * created on another pseudo device (eg. lagg)
1449 1.99 msaitoh * then vlan events are not passed thru, breaking
1450 1.99 msaitoh * operation, but with HW FILTER off it works. If
1451 1.99 msaitoh * using vlans directly on the ixgbe driver you can
1452 1.99 msaitoh * enable this and get full hardware tag filtering.
1453 1.99 msaitoh */
1454 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1455 1.1 dyoung
1456 1.98 msaitoh /*
1457 1.98 msaitoh * Specify the media types supported by this adapter and register
1458 1.98 msaitoh * callbacks to update media and link information
1459 1.98 msaitoh */
1460 1.333 msaitoh ec->ec_ifmedia = &sc->media;
1461 1.333 msaitoh ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixgbe_media_change,
1462 1.333 msaitoh ixgbe_media_status, &sc->core_mtx);
1463 1.45 msaitoh
1464 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1465 1.333 msaitoh ixgbe_add_media_types(sc);
1466 1.49 msaitoh
1467 1.98 msaitoh /* Set autoselect media by default */
1468 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1469 1.1 dyoung
1470 1.156 ozaki if_register(ifp);
1471 1.156 ozaki
1472 1.98 msaitoh return (0);
1473 1.99 msaitoh } /* ixgbe_setup_interface */
1474 1.1 dyoung
1475 1.99 msaitoh /************************************************************************
1476 1.99 msaitoh * ixgbe_add_media_types
1477 1.99 msaitoh ************************************************************************/
1478 1.98 msaitoh static void
1479 1.333 msaitoh ixgbe_add_media_types(struct ixgbe_softc *sc)
1480 1.98 msaitoh {
1481 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1482 1.186 msaitoh u64 layer;
1483 1.1 dyoung
1484 1.333 msaitoh layer = sc->phy_layer;
1485 1.1 dyoung
1486 1.98 msaitoh #define ADD(mm, dd) \
1487 1.333 msaitoh ifmedia_add(&sc->media, IFM_ETHER | (mm), (dd), NULL);
1488 1.1 dyoung
1489 1.140 msaitoh ADD(IFM_NONE, 0);
1490 1.140 msaitoh
1491 1.98 msaitoh /* Media types with matching NetBSD media defines */
1492 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1493 1.98 msaitoh ADD(IFM_10G_T | IFM_FDX, 0);
1494 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1495 1.98 msaitoh ADD(IFM_1000_T | IFM_FDX, 0);
1496 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1497 1.98 msaitoh ADD(IFM_100_TX | IFM_FDX, 0);
1498 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1499 1.99 msaitoh ADD(IFM_10_T | IFM_FDX, 0);
1500 1.26 msaitoh
1501 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1502 1.319 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1503 1.98 msaitoh ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1504 1.1 dyoung
1505 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1506 1.98 msaitoh ADD(IFM_10G_LR | IFM_FDX, 0);
1507 1.319 msaitoh if (hw->phy.multispeed_fiber)
1508 1.98 msaitoh ADD(IFM_1000_LX | IFM_FDX, 0);
1509 1.98 msaitoh }
1510 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1511 1.98 msaitoh ADD(IFM_10G_SR | IFM_FDX, 0);
1512 1.319 msaitoh if (hw->phy.multispeed_fiber)
1513 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1514 1.319 msaitoh } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1515 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1516 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1517 1.98 msaitoh ADD(IFM_10G_CX4 | IFM_FDX, 0);
1518 1.1 dyoung
1519 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1520 1.98 msaitoh ADD(IFM_10G_KR | IFM_FDX, 0);
1521 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1522 1.180 msaitoh ADD(IFM_10G_KX4 | IFM_FDX, 0);
1523 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1524 1.98 msaitoh ADD(IFM_1000_KX | IFM_FDX, 0);
1525 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1526 1.99 msaitoh ADD(IFM_2500_KX | IFM_FDX, 0);
1527 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1528 1.103 msaitoh ADD(IFM_2500_T | IFM_FDX, 0);
1529 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T)
1530 1.103 msaitoh ADD(IFM_5000_T | IFM_FDX, 0);
1531 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1532 1.208 msaitoh ADD(IFM_1000_BX10 | IFM_FDX, 0);
1533 1.98 msaitoh /* XXX no ifmedia_set? */
1534 1.185 msaitoh
1535 1.98 msaitoh ADD(IFM_AUTO, 0);
1536 1.98 msaitoh
1537 1.98 msaitoh #undef ADD
1538 1.99 msaitoh } /* ixgbe_add_media_types */
1539 1.1 dyoung
1540 1.99 msaitoh /************************************************************************
1541 1.99 msaitoh * ixgbe_is_sfp
1542 1.99 msaitoh ************************************************************************/
1543 1.99 msaitoh static inline bool
1544 1.99 msaitoh ixgbe_is_sfp(struct ixgbe_hw *hw)
1545 1.99 msaitoh {
1546 1.99 msaitoh switch (hw->mac.type) {
1547 1.99 msaitoh case ixgbe_mac_82598EB:
1548 1.99 msaitoh if (hw->phy.type == ixgbe_phy_nl)
1549 1.144 msaitoh return (TRUE);
1550 1.144 msaitoh return (FALSE);
1551 1.99 msaitoh case ixgbe_mac_82599EB:
1552 1.203 msaitoh case ixgbe_mac_X550EM_x:
1553 1.203 msaitoh case ixgbe_mac_X550EM_a:
1554 1.99 msaitoh switch (hw->mac.ops.get_media_type(hw)) {
1555 1.99 msaitoh case ixgbe_media_type_fiber:
1556 1.99 msaitoh case ixgbe_media_type_fiber_qsfp:
1557 1.144 msaitoh return (TRUE);
1558 1.99 msaitoh default:
1559 1.144 msaitoh return (FALSE);
1560 1.99 msaitoh }
1561 1.99 msaitoh default:
1562 1.144 msaitoh return (FALSE);
1563 1.99 msaitoh }
1564 1.99 msaitoh } /* ixgbe_is_sfp */
1565 1.99 msaitoh
1566 1.226 thorpej static void
1567 1.333 msaitoh ixgbe_schedule_admin_tasklet(struct ixgbe_softc *sc)
1568 1.226 thorpej {
1569 1.243 msaitoh
1570 1.333 msaitoh KASSERT(mutex_owned(&sc->admin_mtx));
1571 1.260 knakahar
1572 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
1573 1.333 msaitoh if (sc->admin_pending == 0)
1574 1.333 msaitoh workqueue_enqueue(sc->admin_wq,
1575 1.333 msaitoh &sc->admin_wc, NULL);
1576 1.333 msaitoh sc->admin_pending = 1;
1577 1.255 msaitoh }
1578 1.226 thorpej }
1579 1.226 thorpej
1580 1.99 msaitoh /************************************************************************
1581 1.99 msaitoh * ixgbe_config_link
1582 1.99 msaitoh ************************************************************************/
1583 1.98 msaitoh static void
1584 1.333 msaitoh ixgbe_config_link(struct ixgbe_softc *sc)
1585 1.98 msaitoh {
1586 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1587 1.186 msaitoh u32 autoneg, err = 0;
1588 1.233 msaitoh u32 task_requests = 0;
1589 1.186 msaitoh bool sfp, negotiate = false;
1590 1.1 dyoung
1591 1.98 msaitoh sfp = ixgbe_is_sfp(hw);
1592 1.1 dyoung
1593 1.185 msaitoh if (sfp) {
1594 1.99 msaitoh if (hw->phy.multispeed_fiber) {
1595 1.99 msaitoh ixgbe_enable_tx_laser(hw);
1596 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1597 1.99 msaitoh }
1598 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1599 1.260 knakahar
1600 1.333 msaitoh mutex_enter(&sc->admin_mtx);
1601 1.333 msaitoh sc->task_requests |= task_requests;
1602 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
1603 1.333 msaitoh mutex_exit(&sc->admin_mtx);
1604 1.98 msaitoh } else {
1605 1.333 msaitoh struct ifmedia *ifm = &sc->media;
1606 1.143 msaitoh
1607 1.98 msaitoh if (hw->mac.ops.check_link)
1608 1.333 msaitoh err = ixgbe_check_link(hw, &sc->link_speed,
1609 1.333 msaitoh &sc->link_up, FALSE);
1610 1.98 msaitoh if (err)
1611 1.144 msaitoh return;
1612 1.143 msaitoh
1613 1.143 msaitoh /*
1614 1.143 msaitoh * Check if it's the first call. If it's the first call,
1615 1.143 msaitoh * get value for auto negotiation.
1616 1.143 msaitoh */
1617 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
1618 1.143 msaitoh if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1619 1.143 msaitoh && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1620 1.186 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1621 1.99 msaitoh &negotiate);
1622 1.98 msaitoh if (err)
1623 1.144 msaitoh return;
1624 1.98 msaitoh if (hw->mac.ops.setup_link)
1625 1.186 msaitoh err = hw->mac.ops.setup_link(hw, autoneg,
1626 1.333 msaitoh sc->link_up);
1627 1.98 msaitoh }
1628 1.99 msaitoh } /* ixgbe_config_link */
1629 1.98 msaitoh
1630 1.99 msaitoh /************************************************************************
1631 1.99 msaitoh * ixgbe_update_stats_counters - Update board statistics counters.
1632 1.99 msaitoh ************************************************************************/
1633 1.98 msaitoh static void
1634 1.333 msaitoh ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1635 1.1 dyoung {
1636 1.333 msaitoh struct ifnet *ifp = sc->ifp;
1637 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1638 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1639 1.305 msaitoh u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1640 1.349 msaitoh u64 total, total_missed_rx = 0, total_qprdc = 0;
1641 1.303 msaitoh uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1642 1.186 msaitoh unsigned int queue_counters;
1643 1.176 msaitoh int i;
1644 1.44 msaitoh
1645 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1646 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1647 1.303 msaitoh
1648 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1649 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1650 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
1651 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1652 1.44 msaitoh
1653 1.176 msaitoh /* 16 registers exist */
1654 1.333 msaitoh queue_counters = uimin(__arraycount(stats->qprc), sc->num_queues);
1655 1.176 msaitoh for (i = 0; i < queue_counters; i++) {
1656 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1657 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1658 1.329 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1659 1.349 msaitoh uint32_t qprdc;
1660 1.349 msaitoh
1661 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbrc[i],
1662 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)) +
1663 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32));
1664 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbtc[i],
1665 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)) +
1666 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32));
1667 1.349 msaitoh /* QPRDC will be added to iqdrops. */
1668 1.349 msaitoh qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1669 1.349 msaitoh IXGBE_EVC_ADD(&stats->qprdc[i], qprdc);
1670 1.349 msaitoh total_qprdc += qprdc;
1671 1.329 msaitoh } else {
1672 1.329 msaitoh /* 82598 */
1673 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBRC(i), qbrc[i]);
1674 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBTC(i), qbtc[i]);
1675 1.329 msaitoh }
1676 1.98 msaitoh }
1677 1.151 msaitoh
1678 1.175 msaitoh /* 8 registers exist */
1679 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1680 1.98 msaitoh uint32_t mp;
1681 1.44 msaitoh
1682 1.151 msaitoh /* MPC */
1683 1.98 msaitoh mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1684 1.98 msaitoh /* global total per queue */
1685 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpc[i], mp);
1686 1.98 msaitoh /* running comprehensive total for stats display */
1687 1.98 msaitoh total_missed_rx += mp;
1688 1.44 msaitoh
1689 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1690 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1691 1.151 msaitoh
1692 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1693 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1694 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1695 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1696 1.319 msaitoh IXGBE_PXONRXCNT(i), pxonrxc[i]);
1697 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1698 1.319 msaitoh IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1699 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1700 1.319 msaitoh IXGBE_PXON2OFFCNT(i), pxon2offc[i]);
1701 1.151 msaitoh } else {
1702 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1703 1.319 msaitoh IXGBE_PXONRXC(i), pxonrxc[i]);
1704 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1705 1.319 msaitoh IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1706 1.151 msaitoh }
1707 1.98 msaitoh }
1708 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1709 1.44 msaitoh
1710 1.98 msaitoh /* Document says M[LR]FC are valid when link is up and 10Gbps */
1711 1.333 msaitoh if ((sc->link_active == LINK_STATE_UP)
1712 1.333 msaitoh && (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1713 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1714 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1715 1.98 msaitoh }
1716 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
1717 1.326 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LINK_DN_CNT, link_dn_cnt);
1718 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1719 1.44 msaitoh
1720 1.98 msaitoh /* Hardware workaround, gprc counts missed packets */
1721 1.305 msaitoh IXGBE_EVC_ADD(&stats->gprc,
1722 1.305 msaitoh IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1723 1.44 msaitoh
1724 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1725 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1726 1.305 msaitoh total = lxontxc + lxofftxc;
1727 1.44 msaitoh
1728 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1729 1.305 msaitoh IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1730 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1731 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1732 1.280 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1733 1.305 msaitoh - total * ETHER_MIN_LEN);
1734 1.305 msaitoh IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1735 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1736 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1737 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1738 1.98 msaitoh } else {
1739 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1740 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1741 1.98 msaitoh /* 82598 only has a counter in the high register */
1742 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1743 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1744 1.305 msaitoh - total * ETHER_MIN_LEN);
1745 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1746 1.98 msaitoh }
1747 1.44 msaitoh
1748 1.98 msaitoh /*
1749 1.98 msaitoh * Workaround: mprc hardware is incorrectly counting
1750 1.98 msaitoh * broadcasts, so for now we subtract those.
1751 1.98 msaitoh */
1752 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1753 1.305 msaitoh IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1754 1.305 msaitoh - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1755 1.305 msaitoh
1756 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1757 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1758 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1759 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1760 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1761 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1762 1.305 msaitoh
1763 1.305 msaitoh IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1764 1.305 msaitoh IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1765 1.305 msaitoh IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1766 1.305 msaitoh
1767 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1768 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1769 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1770 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1771 1.305 msaitoh
1772 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1773 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1774 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1775 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1776 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1777 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1778 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1779 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1780 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1781 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1782 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1783 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1784 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1785 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1786 1.98 msaitoh /* Only read FCOE on 82599 */
1787 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1788 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1789 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1790 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1791 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1792 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1793 1.98 msaitoh }
1794 1.44 msaitoh
1795 1.44 msaitoh /*
1796 1.224 msaitoh * Fill out the OS statistics structure. Only RX errors are required
1797 1.224 msaitoh * here because all TX counters are incremented in the TX path and
1798 1.224 msaitoh * normal RX counters are prepared in ether_input().
1799 1.44 msaitoh */
1800 1.222 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1801 1.352 riastrad if_statadd_ref(ifp, nsr, if_iqdrops, total_missed_rx + total_qprdc);
1802 1.298 msaitoh
1803 1.298 msaitoh /*
1804 1.298 msaitoh * Aggregate following types of errors as RX errors:
1805 1.298 msaitoh * - CRC error count,
1806 1.298 msaitoh * - illegal byte error count,
1807 1.298 msaitoh * - length error count,
1808 1.298 msaitoh * - undersized packets count,
1809 1.298 msaitoh * - fragmented packets count,
1810 1.298 msaitoh * - oversized packets count,
1811 1.298 msaitoh * - jabber count.
1812 1.298 msaitoh */
1813 1.352 riastrad if_statadd_ref(ifp, nsr, if_ierrors,
1814 1.303 msaitoh crcerrs + illerrc + rlec + ruc + rfc + roc + rjc);
1815 1.298 msaitoh
1816 1.222 thorpej IF_STAT_PUTREF(ifp);
1817 1.99 msaitoh } /* ixgbe_update_stats_counters */
1818 1.1 dyoung
1819 1.99 msaitoh /************************************************************************
1820 1.99 msaitoh * ixgbe_add_hw_stats
1821 1.99 msaitoh *
1822 1.99 msaitoh * Add sysctl variables, one per statistic, to the system.
1823 1.99 msaitoh ************************************************************************/
1824 1.98 msaitoh static void
1825 1.333 msaitoh ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1826 1.1 dyoung {
1827 1.333 msaitoh device_t dev = sc->dev;
1828 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
1829 1.333 msaitoh struct sysctllog **log = &sc->sysctllog;
1830 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
1831 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
1832 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1833 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1834 1.98 msaitoh const char *xname = device_xname(dev);
1835 1.144 msaitoh int i;
1836 1.1 dyoung
1837 1.98 msaitoh /* Driver Statistics */
1838 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1839 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EFBIG");
1840 1.333 msaitoh evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1841 1.98 msaitoh NULL, xname, "m_defrag() failed");
1842 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1843 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EFBIG");
1844 1.333 msaitoh evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1845 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EINVAL");
1846 1.333 msaitoh evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
1847 1.98 msaitoh NULL, xname, "Driver tx dma hard fail other");
1848 1.333 msaitoh evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1849 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EAGAIN");
1850 1.333 msaitoh evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1851 1.98 msaitoh NULL, xname, "Driver tx dma soft fail ENOMEM");
1852 1.333 msaitoh evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
1853 1.98 msaitoh NULL, xname, "Watchdog timeouts");
1854 1.333 msaitoh evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
1855 1.98 msaitoh NULL, xname, "TSO errors");
1856 1.333 msaitoh evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
1857 1.233 msaitoh NULL, xname, "Admin MSI-X IRQ Handled");
1858 1.333 msaitoh evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
1859 1.233 msaitoh NULL, xname, "Link event");
1860 1.333 msaitoh evcnt_attach_dynamic(&sc->mod_workev, EVCNT_TYPE_INTR,
1861 1.233 msaitoh NULL, xname, "SFP+ module event");
1862 1.333 msaitoh evcnt_attach_dynamic(&sc->msf_workev, EVCNT_TYPE_INTR,
1863 1.233 msaitoh NULL, xname, "Multispeed event");
1864 1.333 msaitoh evcnt_attach_dynamic(&sc->phy_workev, EVCNT_TYPE_INTR,
1865 1.233 msaitoh NULL, xname, "External PHY event");
1866 1.1 dyoung
1867 1.168 msaitoh /* Max number of traffic class is 8 */
1868 1.168 msaitoh KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1869 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1870 1.333 msaitoh snprintf(sc->tcs[i].evnamebuf,
1871 1.333 msaitoh sizeof(sc->tcs[i].evnamebuf), "%s tc%d", xname, i);
1872 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
1873 1.168 msaitoh evcnt_attach_dynamic(&stats->mpc[i],
1874 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1875 1.168 msaitoh "RX Missed Packet Count");
1876 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1877 1.168 msaitoh evcnt_attach_dynamic(&stats->rnbc[i],
1878 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1879 1.333 msaitoh sc->tcs[i].evnamebuf,
1880 1.168 msaitoh "Receive No Buffers");
1881 1.168 msaitoh }
1882 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
1883 1.168 msaitoh evcnt_attach_dynamic(&stats->pxontxc[i],
1884 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1885 1.331 msaitoh "Priority XON Transmitted");
1886 1.168 msaitoh evcnt_attach_dynamic(&stats->pxofftxc[i],
1887 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1888 1.331 msaitoh "Priority XOFF Transmitted");
1889 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
1890 1.168 msaitoh evcnt_attach_dynamic(&stats->pxon2offc[i],
1891 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1892 1.333 msaitoh sc->tcs[i].evnamebuf,
1893 1.331 msaitoh "Priority XON to XOFF");
1894 1.330 msaitoh evcnt_attach_dynamic(&stats->pxonrxc[i],
1895 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1896 1.331 msaitoh "Priority XON Received");
1897 1.330 msaitoh evcnt_attach_dynamic(&stats->pxoffrxc[i],
1898 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1899 1.331 msaitoh "Priority XOFF Received");
1900 1.168 msaitoh }
1901 1.168 msaitoh }
1902 1.168 msaitoh
1903 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
1904 1.135 msaitoh #ifdef LRO
1905 1.135 msaitoh struct lro_ctrl *lro = &rxr->lro;
1906 1.327 msaitoh #endif
1907 1.135 msaitoh
1908 1.333 msaitoh snprintf(sc->queues[i].evnamebuf,
1909 1.333 msaitoh sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
1910 1.333 msaitoh snprintf(sc->queues[i].namebuf,
1911 1.333 msaitoh sizeof(sc->queues[i].namebuf), "q%d", i);
1912 1.1 dyoung
1913 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
1914 1.319 msaitoh aprint_error_dev(dev,
1915 1.319 msaitoh "could not create sysctl root\n");
1916 1.98 msaitoh break;
1917 1.98 msaitoh }
1918 1.1 dyoung
1919 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &rnode,
1920 1.98 msaitoh 0, CTLTYPE_NODE,
1921 1.333 msaitoh sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1922 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1923 1.98 msaitoh break;
1924 1.23 msaitoh
1925 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1926 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
1927 1.98 msaitoh "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1928 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler, 0,
1929 1.333 msaitoh (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1930 1.98 msaitoh break;
1931 1.1 dyoung
1932 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1933 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1934 1.98 msaitoh "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1935 1.98 msaitoh ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1936 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1937 1.98 msaitoh break;
1938 1.1 dyoung
1939 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1940 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1941 1.98 msaitoh "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1942 1.98 msaitoh ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1943 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1944 1.98 msaitoh break;
1945 1.1 dyoung
1946 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1947 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1948 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor next to check"),
1949 1.280 msaitoh ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1950 1.154 msaitoh CTL_CREATE, CTL_EOL) != 0)
1951 1.154 msaitoh break;
1952 1.154 msaitoh
1953 1.154 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1954 1.287 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1955 1.287 msaitoh SYSCTL_DESCR("Receive Descriptor next to refresh"),
1956 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1957 1.287 msaitoh CTL_CREATE, CTL_EOL) != 0)
1958 1.287 msaitoh break;
1959 1.287 msaitoh
1960 1.287 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1961 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1962 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Head"),
1963 1.98 msaitoh ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1964 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1965 1.33 msaitoh break;
1966 1.98 msaitoh
1967 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1968 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1969 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Tail"),
1970 1.98 msaitoh ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1971 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1972 1.28 msaitoh break;
1973 1.98 msaitoh
1974 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
1975 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "IRQs on queue");
1976 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].handleq,
1977 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
1978 1.327 msaitoh "Handled queue in softint");
1979 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
1980 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Requeued in softint");
1981 1.327 msaitoh if (i < __arraycount(stats->qbtc))
1982 1.327 msaitoh evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1983 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1984 1.328 msaitoh "Queue Bytes Transmitted (reg)");
1985 1.327 msaitoh evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1986 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1987 1.328 msaitoh "Queue Packets Transmitted (soft)");
1988 1.327 msaitoh if (i < __arraycount(stats->qptc))
1989 1.280 msaitoh evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1990 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1991 1.328 msaitoh "Queue Packets Transmitted (reg)");
1992 1.327 msaitoh #ifndef IXGBE_LEGACY_TX
1993 1.327 msaitoh evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1994 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1995 1.327 msaitoh "Packets dropped in pcq");
1996 1.327 msaitoh #endif
1997 1.327 msaitoh evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1998 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1999 1.327 msaitoh "TX Queue No Descriptor Available");
2000 1.327 msaitoh evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2001 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "TSO");
2002 1.327 msaitoh
2003 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2004 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2005 1.328 msaitoh "Queue Bytes Received (soft)");
2006 1.327 msaitoh if (i < __arraycount(stats->qbrc))
2007 1.280 msaitoh evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
2008 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2009 1.328 msaitoh "Queue Bytes Received (reg)");
2010 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2011 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2012 1.328 msaitoh "Queue Packets Received (soft)");
2013 1.327 msaitoh if (i < __arraycount(stats->qprc))
2014 1.327 msaitoh evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
2015 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2016 1.328 msaitoh "Queue Packets Received (reg)");
2017 1.327 msaitoh if ((i < __arraycount(stats->qprdc)) &&
2018 1.327 msaitoh (hw->mac.type >= ixgbe_mac_82599EB))
2019 1.151 msaitoh evcnt_attach_dynamic(&stats->qprdc[i],
2020 1.151 msaitoh EVCNT_TYPE_MISC, NULL,
2021 1.333 msaitoh sc->queues[i].evnamebuf,
2022 1.328 msaitoh "Queue Packets Received Drop");
2023 1.33 msaitoh
2024 1.290 msaitoh evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
2025 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
2026 1.98 msaitoh evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2027 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx discarded");
2028 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2029 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
2030 1.98 msaitoh #ifdef LRO
2031 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2032 1.98 msaitoh CTLFLAG_RD, &lro->lro_queued, 0,
2033 1.98 msaitoh "LRO Queued");
2034 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2035 1.98 msaitoh CTLFLAG_RD, &lro->lro_flushed, 0,
2036 1.98 msaitoh "LRO Flushed");
2037 1.98 msaitoh #endif /* LRO */
2038 1.1 dyoung }
2039 1.28 msaitoh
2040 1.99 msaitoh /* MAC stats get their own sub node */
2041 1.98 msaitoh
2042 1.98 msaitoh snprintf(stats->namebuf,
2043 1.98 msaitoh sizeof(stats->namebuf), "%s MAC Statistics", xname);
2044 1.98 msaitoh
2045 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2046 1.98 msaitoh stats->namebuf, "rx csum offload - IP");
2047 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2048 1.98 msaitoh stats->namebuf, "rx csum offload - L4");
2049 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2050 1.98 msaitoh stats->namebuf, "rx csum offload - IP bad");
2051 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2052 1.98 msaitoh stats->namebuf, "rx csum offload - L4 bad");
2053 1.98 msaitoh evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
2054 1.98 msaitoh stats->namebuf, "Interrupt conditions zero");
2055 1.98 msaitoh evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
2056 1.98 msaitoh stats->namebuf, "Legacy interrupts");
2057 1.99 msaitoh
2058 1.98 msaitoh evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2059 1.98 msaitoh stats->namebuf, "CRC Errors");
2060 1.98 msaitoh evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2061 1.98 msaitoh stats->namebuf, "Illegal Byte Errors");
2062 1.98 msaitoh evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2063 1.98 msaitoh stats->namebuf, "Byte Errors");
2064 1.98 msaitoh evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2065 1.98 msaitoh stats->namebuf, "MAC Short Packets Discarded");
2066 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2067 1.98 msaitoh evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2068 1.98 msaitoh stats->namebuf, "Bad SFD");
2069 1.98 msaitoh evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2070 1.98 msaitoh stats->namebuf, "Total Packets Missed");
2071 1.98 msaitoh evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2072 1.98 msaitoh stats->namebuf, "MAC Local Faults");
2073 1.98 msaitoh evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2074 1.98 msaitoh stats->namebuf, "MAC Remote Faults");
2075 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2076 1.326 msaitoh evcnt_attach_dynamic(&stats->link_dn_cnt, EVCNT_TYPE_MISC,
2077 1.326 msaitoh NULL, stats->namebuf, "Link down event in the MAC");
2078 1.98 msaitoh evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2079 1.98 msaitoh stats->namebuf, "Receive Length Errors");
2080 1.98 msaitoh evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2081 1.98 msaitoh stats->namebuf, "Link XON Transmitted");
2082 1.330 msaitoh evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2083 1.330 msaitoh stats->namebuf, "Link XOFF Transmitted");
2084 1.98 msaitoh evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2085 1.98 msaitoh stats->namebuf, "Link XON Received");
2086 1.98 msaitoh evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2087 1.98 msaitoh stats->namebuf, "Link XOFF Received");
2088 1.98 msaitoh
2089 1.98 msaitoh /* Packet Reception Stats */
2090 1.98 msaitoh evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2091 1.98 msaitoh stats->namebuf, "Total Octets Received");
2092 1.98 msaitoh evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2093 1.98 msaitoh stats->namebuf, "Good Octets Received");
2094 1.98 msaitoh evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2095 1.98 msaitoh stats->namebuf, "Total Packets Received");
2096 1.98 msaitoh evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2097 1.98 msaitoh stats->namebuf, "Good Packets Received");
2098 1.98 msaitoh evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2099 1.98 msaitoh stats->namebuf, "Multicast Packets Received");
2100 1.98 msaitoh evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2101 1.98 msaitoh stats->namebuf, "Broadcast Packets Received");
2102 1.98 msaitoh evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2103 1.98 msaitoh stats->namebuf, "64 byte frames received ");
2104 1.98 msaitoh evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2105 1.98 msaitoh stats->namebuf, "65-127 byte frames received");
2106 1.98 msaitoh evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2107 1.98 msaitoh stats->namebuf, "128-255 byte frames received");
2108 1.98 msaitoh evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2109 1.98 msaitoh stats->namebuf, "256-511 byte frames received");
2110 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2111 1.98 msaitoh stats->namebuf, "512-1023 byte frames received");
2112 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2113 1.98 msaitoh stats->namebuf, "1023-1522 byte frames received");
2114 1.98 msaitoh evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2115 1.98 msaitoh stats->namebuf, "Receive Undersized");
2116 1.98 msaitoh evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2117 1.98 msaitoh stats->namebuf, "Fragmented Packets Received ");
2118 1.98 msaitoh evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2119 1.98 msaitoh stats->namebuf, "Oversized Packets Received");
2120 1.98 msaitoh evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2121 1.98 msaitoh stats->namebuf, "Received Jabber");
2122 1.98 msaitoh evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2123 1.98 msaitoh stats->namebuf, "Management Packets Received");
2124 1.98 msaitoh evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2125 1.98 msaitoh stats->namebuf, "Management Packets Dropped");
2126 1.98 msaitoh evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2127 1.98 msaitoh stats->namebuf, "Checksum Errors");
2128 1.1 dyoung
2129 1.98 msaitoh /* Packet Transmission Stats */
2130 1.98 msaitoh evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2131 1.98 msaitoh stats->namebuf, "Good Octets Transmitted");
2132 1.98 msaitoh evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2133 1.98 msaitoh stats->namebuf, "Total Packets Transmitted");
2134 1.98 msaitoh evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2135 1.98 msaitoh stats->namebuf, "Good Packets Transmitted");
2136 1.98 msaitoh evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2137 1.98 msaitoh stats->namebuf, "Broadcast Packets Transmitted");
2138 1.98 msaitoh evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2139 1.98 msaitoh stats->namebuf, "Multicast Packets Transmitted");
2140 1.98 msaitoh evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2141 1.98 msaitoh stats->namebuf, "Management Packets Transmitted");
2142 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2143 1.98 msaitoh stats->namebuf, "64 byte frames transmitted ");
2144 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2145 1.98 msaitoh stats->namebuf, "65-127 byte frames transmitted");
2146 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2147 1.98 msaitoh stats->namebuf, "128-255 byte frames transmitted");
2148 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2149 1.98 msaitoh stats->namebuf, "256-511 byte frames transmitted");
2150 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2151 1.98 msaitoh stats->namebuf, "512-1023 byte frames transmitted");
2152 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2153 1.98 msaitoh stats->namebuf, "1024-1522 byte frames transmitted");
2154 1.99 msaitoh } /* ixgbe_add_hw_stats */
2155 1.48 msaitoh
2156 1.1 dyoung static void
2157 1.333 msaitoh ixgbe_clear_evcnt(struct ixgbe_softc *sc)
2158 1.1 dyoung {
2159 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
2160 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
2161 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2162 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
2163 1.168 msaitoh int i;
2164 1.98 msaitoh
2165 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
2166 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
2167 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
2168 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
2169 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
2170 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
2171 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
2172 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, 0);
2173 1.333 msaitoh IXGBE_EVC_STORE(&sc->watchdog_events, 0);
2174 1.333 msaitoh IXGBE_EVC_STORE(&sc->admin_irqev, 0);
2175 1.333 msaitoh IXGBE_EVC_STORE(&sc->link_workev, 0);
2176 1.333 msaitoh IXGBE_EVC_STORE(&sc->mod_workev, 0);
2177 1.333 msaitoh IXGBE_EVC_STORE(&sc->msf_workev, 0);
2178 1.333 msaitoh IXGBE_EVC_STORE(&sc->phy_workev, 0);
2179 1.98 msaitoh
2180 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2181 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
2182 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpc[i], 0);
2183 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
2184 1.305 msaitoh IXGBE_EVC_STORE(&stats->rnbc[i], 0);
2185 1.168 msaitoh }
2186 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
2187 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
2188 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
2189 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
2190 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
2191 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2192 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
2193 1.168 msaitoh }
2194 1.168 msaitoh }
2195 1.168 msaitoh
2196 1.333 msaitoh txr = sc->tx_rings;
2197 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
2198 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
2199 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
2200 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].req, 0);
2201 1.305 msaitoh IXGBE_EVC_STORE(&txr->total_packets, 0);
2202 1.98 msaitoh #ifndef IXGBE_LEGACY_TX
2203 1.305 msaitoh IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2204 1.45 msaitoh #endif
2205 1.327 msaitoh IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2206 1.327 msaitoh IXGBE_EVC_STORE(&txr->tso_tx, 0);
2207 1.134 msaitoh txr->q_efbig_tx_dma_setup = 0;
2208 1.134 msaitoh txr->q_mbuf_defrag_failed = 0;
2209 1.134 msaitoh txr->q_efbig2_tx_dma_setup = 0;
2210 1.134 msaitoh txr->q_einval_tx_dma_setup = 0;
2211 1.134 msaitoh txr->q_other_tx_dma_setup = 0;
2212 1.134 msaitoh txr->q_eagain_tx_dma_setup = 0;
2213 1.134 msaitoh txr->q_enomem_tx_dma_setup = 0;
2214 1.134 msaitoh txr->q_tso_err = 0;
2215 1.1 dyoung
2216 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
2217 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprc[i], 0);
2218 1.305 msaitoh IXGBE_EVC_STORE(&stats->qptc[i], 0);
2219 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbrc[i], 0);
2220 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbtc[i], 0);
2221 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2222 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprdc[i], 0);
2223 1.98 msaitoh }
2224 1.98 msaitoh
2225 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2226 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2227 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2228 1.305 msaitoh IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2229 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2230 1.305 msaitoh }
2231 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs, 0);
2232 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs, 0);
2233 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2234 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2235 1.305 msaitoh IXGBE_EVC_STORE(&stats->intzero, 0);
2236 1.305 msaitoh IXGBE_EVC_STORE(&stats->legint, 0);
2237 1.305 msaitoh IXGBE_EVC_STORE(&stats->crcerrs, 0);
2238 1.305 msaitoh IXGBE_EVC_STORE(&stats->illerrc, 0);
2239 1.305 msaitoh IXGBE_EVC_STORE(&stats->errbc, 0);
2240 1.305 msaitoh IXGBE_EVC_STORE(&stats->mspdc, 0);
2241 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2242 1.305 msaitoh IXGBE_EVC_STORE(&stats->mbsdc, 0);
2243 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpctotal, 0);
2244 1.305 msaitoh IXGBE_EVC_STORE(&stats->mlfc, 0);
2245 1.305 msaitoh IXGBE_EVC_STORE(&stats->mrfc, 0);
2246 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2247 1.326 msaitoh IXGBE_EVC_STORE(&stats->link_dn_cnt, 0);
2248 1.305 msaitoh IXGBE_EVC_STORE(&stats->rlec, 0);
2249 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxontxc, 0);
2250 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxonrxc, 0);
2251 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxofftxc, 0);
2252 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
2253 1.98 msaitoh
2254 1.98 msaitoh /* Packet Reception Stats */
2255 1.305 msaitoh IXGBE_EVC_STORE(&stats->tor, 0);
2256 1.305 msaitoh IXGBE_EVC_STORE(&stats->gorc, 0);
2257 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpr, 0);
2258 1.305 msaitoh IXGBE_EVC_STORE(&stats->gprc, 0);
2259 1.305 msaitoh IXGBE_EVC_STORE(&stats->mprc, 0);
2260 1.305 msaitoh IXGBE_EVC_STORE(&stats->bprc, 0);
2261 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc64, 0);
2262 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc127, 0);
2263 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc255, 0);
2264 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc511, 0);
2265 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1023, 0);
2266 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1522, 0);
2267 1.305 msaitoh IXGBE_EVC_STORE(&stats->ruc, 0);
2268 1.305 msaitoh IXGBE_EVC_STORE(&stats->rfc, 0);
2269 1.305 msaitoh IXGBE_EVC_STORE(&stats->roc, 0);
2270 1.305 msaitoh IXGBE_EVC_STORE(&stats->rjc, 0);
2271 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngprc, 0);
2272 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngpdc, 0);
2273 1.305 msaitoh IXGBE_EVC_STORE(&stats->xec, 0);
2274 1.98 msaitoh
2275 1.98 msaitoh /* Packet Transmission Stats */
2276 1.305 msaitoh IXGBE_EVC_STORE(&stats->gotc, 0);
2277 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpt, 0);
2278 1.305 msaitoh IXGBE_EVC_STORE(&stats->gptc, 0);
2279 1.305 msaitoh IXGBE_EVC_STORE(&stats->bptc, 0);
2280 1.305 msaitoh IXGBE_EVC_STORE(&stats->mptc, 0);
2281 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngptc, 0);
2282 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc64, 0);
2283 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc127, 0);
2284 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc255, 0);
2285 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc511, 0);
2286 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1023, 0);
2287 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1522, 0);
2288 1.98 msaitoh }
2289 1.98 msaitoh
2290 1.99 msaitoh /************************************************************************
2291 1.99 msaitoh * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2292 1.99 msaitoh *
2293 1.99 msaitoh * Retrieves the TDH value from the hardware
2294 1.99 msaitoh ************************************************************************/
2295 1.185 msaitoh static int
2296 1.98 msaitoh ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2297 1.98 msaitoh {
2298 1.98 msaitoh struct sysctlnode node = *rnode;
2299 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2300 1.333 msaitoh struct ixgbe_softc *sc;
2301 1.98 msaitoh uint32_t val;
2302 1.98 msaitoh
2303 1.99 msaitoh if (!txr)
2304 1.99 msaitoh return (0);
2305 1.99 msaitoh
2306 1.333 msaitoh sc = txr->sc;
2307 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2308 1.169 msaitoh return (EPERM);
2309 1.169 msaitoh
2310 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->me));
2311 1.98 msaitoh node.sysctl_data = &val;
2312 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2313 1.99 msaitoh } /* ixgbe_sysctl_tdh_handler */
2314 1.98 msaitoh
2315 1.99 msaitoh /************************************************************************
2316 1.99 msaitoh * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2317 1.99 msaitoh *
2318 1.99 msaitoh * Retrieves the TDT value from the hardware
2319 1.99 msaitoh ************************************************************************/
2320 1.185 msaitoh static int
2321 1.98 msaitoh ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2322 1.98 msaitoh {
2323 1.98 msaitoh struct sysctlnode node = *rnode;
2324 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2325 1.333 msaitoh struct ixgbe_softc *sc;
2326 1.98 msaitoh uint32_t val;
2327 1.1 dyoung
2328 1.99 msaitoh if (!txr)
2329 1.99 msaitoh return (0);
2330 1.99 msaitoh
2331 1.333 msaitoh sc = txr->sc;
2332 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2333 1.169 msaitoh return (EPERM);
2334 1.169 msaitoh
2335 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->me));
2336 1.98 msaitoh node.sysctl_data = &val;
2337 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2338 1.99 msaitoh } /* ixgbe_sysctl_tdt_handler */
2339 1.45 msaitoh
2340 1.99 msaitoh /************************************************************************
2341 1.154 msaitoh * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2342 1.154 msaitoh * handler function
2343 1.154 msaitoh *
2344 1.154 msaitoh * Retrieves the next_to_check value
2345 1.154 msaitoh ************************************************************************/
2346 1.185 msaitoh static int
2347 1.154 msaitoh ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2348 1.154 msaitoh {
2349 1.154 msaitoh struct sysctlnode node = *rnode;
2350 1.154 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2351 1.333 msaitoh struct ixgbe_softc *sc;
2352 1.154 msaitoh uint32_t val;
2353 1.154 msaitoh
2354 1.154 msaitoh if (!rxr)
2355 1.154 msaitoh return (0);
2356 1.154 msaitoh
2357 1.333 msaitoh sc = rxr->sc;
2358 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2359 1.169 msaitoh return (EPERM);
2360 1.169 msaitoh
2361 1.154 msaitoh val = rxr->next_to_check;
2362 1.154 msaitoh node.sysctl_data = &val;
2363 1.154 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2364 1.154 msaitoh } /* ixgbe_sysctl_next_to_check_handler */
2365 1.154 msaitoh
2366 1.154 msaitoh /************************************************************************
2367 1.287 msaitoh * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2368 1.287 msaitoh * handler function
2369 1.287 msaitoh *
2370 1.287 msaitoh * Retrieves the next_to_refresh value
2371 1.287 msaitoh ************************************************************************/
2372 1.287 msaitoh static int
2373 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2374 1.287 msaitoh {
2375 1.287 msaitoh struct sysctlnode node = *rnode;
2376 1.287 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2377 1.333 msaitoh struct ixgbe_softc *sc;
2378 1.287 msaitoh uint32_t val;
2379 1.287 msaitoh
2380 1.287 msaitoh if (!rxr)
2381 1.287 msaitoh return (0);
2382 1.287 msaitoh
2383 1.333 msaitoh sc = rxr->sc;
2384 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2385 1.287 msaitoh return (EPERM);
2386 1.287 msaitoh
2387 1.287 msaitoh val = rxr->next_to_refresh;
2388 1.287 msaitoh node.sysctl_data = &val;
2389 1.287 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2390 1.287 msaitoh } /* ixgbe_sysctl_next_to_refresh_handler */
2391 1.287 msaitoh
2392 1.287 msaitoh /************************************************************************
2393 1.99 msaitoh * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2394 1.99 msaitoh *
2395 1.99 msaitoh * Retrieves the RDH value from the hardware
2396 1.99 msaitoh ************************************************************************/
2397 1.185 msaitoh static int
2398 1.98 msaitoh ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2399 1.98 msaitoh {
2400 1.98 msaitoh struct sysctlnode node = *rnode;
2401 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2402 1.333 msaitoh struct ixgbe_softc *sc;
2403 1.98 msaitoh uint32_t val;
2404 1.1 dyoung
2405 1.99 msaitoh if (!rxr)
2406 1.99 msaitoh return (0);
2407 1.99 msaitoh
2408 1.333 msaitoh sc = rxr->sc;
2409 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2410 1.169 msaitoh return (EPERM);
2411 1.169 msaitoh
2412 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDH(rxr->me));
2413 1.98 msaitoh node.sysctl_data = &val;
2414 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2415 1.99 msaitoh } /* ixgbe_sysctl_rdh_handler */
2416 1.1 dyoung
2417 1.99 msaitoh /************************************************************************
2418 1.99 msaitoh * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2419 1.99 msaitoh *
2420 1.99 msaitoh * Retrieves the RDT value from the hardware
2421 1.99 msaitoh ************************************************************************/
2422 1.185 msaitoh static int
2423 1.98 msaitoh ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2424 1.98 msaitoh {
2425 1.98 msaitoh struct sysctlnode node = *rnode;
2426 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2427 1.333 msaitoh struct ixgbe_softc *sc;
2428 1.98 msaitoh uint32_t val;
2429 1.1 dyoung
2430 1.99 msaitoh if (!rxr)
2431 1.99 msaitoh return (0);
2432 1.99 msaitoh
2433 1.333 msaitoh sc = rxr->sc;
2434 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2435 1.169 msaitoh return (EPERM);
2436 1.169 msaitoh
2437 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDT(rxr->me));
2438 1.98 msaitoh node.sysctl_data = &val;
2439 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2440 1.99 msaitoh } /* ixgbe_sysctl_rdt_handler */
2441 1.1 dyoung
2442 1.193 msaitoh static int
2443 1.193 msaitoh ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2444 1.193 msaitoh {
2445 1.193 msaitoh struct ifnet *ifp = &ec->ec_if;
2446 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2447 1.193 msaitoh int rv;
2448 1.193 msaitoh
2449 1.193 msaitoh if (set)
2450 1.333 msaitoh rv = ixgbe_register_vlan(sc, vid);
2451 1.193 msaitoh else
2452 1.333 msaitoh rv = ixgbe_unregister_vlan(sc, vid);
2453 1.193 msaitoh
2454 1.200 msaitoh if (rv != 0)
2455 1.200 msaitoh return rv;
2456 1.200 msaitoh
2457 1.200 msaitoh /*
2458 1.200 msaitoh * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2459 1.200 msaitoh * or 0 to 1.
2460 1.200 msaitoh */
2461 1.200 msaitoh if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2462 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2463 1.200 msaitoh
2464 1.193 msaitoh return rv;
2465 1.193 msaitoh }
2466 1.193 msaitoh
2467 1.99 msaitoh /************************************************************************
2468 1.99 msaitoh * ixgbe_register_vlan
2469 1.99 msaitoh *
2470 1.99 msaitoh * Run via vlan config EVENT, it enables us to use the
2471 1.99 msaitoh * HW Filter table since we can get the vlan id. This
2472 1.99 msaitoh * just creates the entry in the soft version of the
2473 1.99 msaitoh * VFTA, init will repopulate the real table.
2474 1.99 msaitoh ************************************************************************/
2475 1.193 msaitoh static int
2476 1.333 msaitoh ixgbe_register_vlan(struct ixgbe_softc *sc, u16 vtag)
2477 1.98 msaitoh {
2478 1.98 msaitoh u16 index, bit;
2479 1.193 msaitoh int error;
2480 1.48 msaitoh
2481 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2482 1.193 msaitoh return EINVAL;
2483 1.1 dyoung
2484 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2485 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2486 1.98 msaitoh bit = vtag & 0x1F;
2487 1.333 msaitoh sc->shadow_vfta[index] |= ((u32)1 << bit);
2488 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, true,
2489 1.193 msaitoh true);
2490 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2491 1.193 msaitoh if (error != 0)
2492 1.193 msaitoh error = EACCES;
2493 1.193 msaitoh
2494 1.193 msaitoh return error;
2495 1.99 msaitoh } /* ixgbe_register_vlan */
2496 1.1 dyoung
2497 1.99 msaitoh /************************************************************************
2498 1.99 msaitoh * ixgbe_unregister_vlan
2499 1.99 msaitoh *
2500 1.99 msaitoh * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2501 1.99 msaitoh ************************************************************************/
2502 1.193 msaitoh static int
2503 1.333 msaitoh ixgbe_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
2504 1.98 msaitoh {
2505 1.98 msaitoh u16 index, bit;
2506 1.193 msaitoh int error;
2507 1.1 dyoung
2508 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2509 1.193 msaitoh return EINVAL;
2510 1.1 dyoung
2511 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2512 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2513 1.98 msaitoh bit = vtag & 0x1F;
2514 1.333 msaitoh sc->shadow_vfta[index] &= ~((u32)1 << bit);
2515 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, false,
2516 1.193 msaitoh true);
2517 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2518 1.193 msaitoh if (error != 0)
2519 1.193 msaitoh error = EACCES;
2520 1.193 msaitoh
2521 1.193 msaitoh return error;
2522 1.99 msaitoh } /* ixgbe_unregister_vlan */
2523 1.98 msaitoh
2524 1.98 msaitoh static void
2525 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *sc)
2526 1.98 msaitoh {
2527 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2528 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2529 1.98 msaitoh struct rx_ring *rxr;
2530 1.200 msaitoh u32 ctrl;
2531 1.186 msaitoh int i;
2532 1.177 msaitoh bool hwtagging;
2533 1.98 msaitoh
2534 1.178 msaitoh /* Enable HW tagging only if any vlan is attached */
2535 1.177 msaitoh hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2536 1.178 msaitoh && VLAN_ATTACHED(ec);
2537 1.1 dyoung
2538 1.98 msaitoh /* Setup the queues for vlans */
2539 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
2540 1.333 msaitoh rxr = &sc->rx_rings[i];
2541 1.178 msaitoh /*
2542 1.178 msaitoh * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2543 1.178 msaitoh */
2544 1.177 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
2545 1.177 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2546 1.177 msaitoh if (hwtagging)
2547 1.115 msaitoh ctrl |= IXGBE_RXDCTL_VME;
2548 1.177 msaitoh else
2549 1.177 msaitoh ctrl &= ~IXGBE_RXDCTL_VME;
2550 1.177 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2551 1.98 msaitoh }
2552 1.177 msaitoh rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2553 1.1 dyoung }
2554 1.1 dyoung
2555 1.200 msaitoh /* VLAN hw tagging for 82598 */
2556 1.200 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
2557 1.200 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2558 1.200 msaitoh if (hwtagging)
2559 1.200 msaitoh ctrl |= IXGBE_VLNCTRL_VME;
2560 1.200 msaitoh else
2561 1.200 msaitoh ctrl &= ~IXGBE_VLNCTRL_VME;
2562 1.200 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2563 1.200 msaitoh }
2564 1.200 msaitoh } /* ixgbe_setup_vlan_hw_tagging */
2565 1.200 msaitoh
2566 1.200 msaitoh static void
2567 1.333 msaitoh ixgbe_setup_vlan_hw_support(struct ixgbe_softc *sc)
2568 1.200 msaitoh {
2569 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2570 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2571 1.200 msaitoh int i;
2572 1.200 msaitoh u32 ctrl;
2573 1.200 msaitoh struct vlanid_list *vlanidp;
2574 1.200 msaitoh
2575 1.200 msaitoh /*
2576 1.294 skrll * This function is called from both if_init and ifflags_cb()
2577 1.200 msaitoh * on NetBSD.
2578 1.200 msaitoh */
2579 1.200 msaitoh
2580 1.200 msaitoh /*
2581 1.200 msaitoh * Part 1:
2582 1.200 msaitoh * Setup VLAN HW tagging
2583 1.200 msaitoh */
2584 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2585 1.200 msaitoh
2586 1.200 msaitoh /*
2587 1.200 msaitoh * Part 2:
2588 1.200 msaitoh * Setup VLAN HW filter
2589 1.200 msaitoh */
2590 1.193 msaitoh /* Cleanup shadow_vfta */
2591 1.193 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2592 1.333 msaitoh sc->shadow_vfta[i] = 0;
2593 1.193 msaitoh /* Generate shadow_vfta from ec_vids */
2594 1.201 msaitoh ETHER_LOCK(ec);
2595 1.193 msaitoh SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2596 1.193 msaitoh uint32_t idx;
2597 1.193 msaitoh
2598 1.193 msaitoh idx = vlanidp->vid / 32;
2599 1.193 msaitoh KASSERT(idx < IXGBE_VFTA_SIZE);
2600 1.333 msaitoh sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2601 1.193 msaitoh }
2602 1.201 msaitoh ETHER_UNLOCK(ec);
2603 1.99 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2604 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), sc->shadow_vfta[i]);
2605 1.22 msaitoh
2606 1.98 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2607 1.98 msaitoh /* Enable the Filter Table if enabled */
2608 1.177 msaitoh if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2609 1.98 msaitoh ctrl |= IXGBE_VLNCTRL_VFE;
2610 1.177 msaitoh else
2611 1.177 msaitoh ctrl &= ~IXGBE_VLNCTRL_VFE;
2612 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2613 1.99 msaitoh } /* ixgbe_setup_vlan_hw_support */
2614 1.1 dyoung
2615 1.99 msaitoh /************************************************************************
2616 1.99 msaitoh * ixgbe_get_slot_info
2617 1.99 msaitoh *
2618 1.99 msaitoh * Get the width and transaction speed of
2619 1.99 msaitoh * the slot this adapter is plugged into.
2620 1.99 msaitoh ************************************************************************/
2621 1.98 msaitoh static void
2622 1.333 msaitoh ixgbe_get_slot_info(struct ixgbe_softc *sc)
2623 1.98 msaitoh {
2624 1.333 msaitoh device_t dev = sc->dev;
2625 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2626 1.186 msaitoh u32 offset;
2627 1.98 msaitoh u16 link;
2628 1.186 msaitoh int bus_info_valid = TRUE;
2629 1.99 msaitoh
2630 1.99 msaitoh /* Some devices are behind an internal bridge */
2631 1.99 msaitoh switch (hw->device_id) {
2632 1.99 msaitoh case IXGBE_DEV_ID_82599_SFP_SF_QP:
2633 1.99 msaitoh case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2634 1.99 msaitoh goto get_parent_info;
2635 1.99 msaitoh default:
2636 1.99 msaitoh break;
2637 1.99 msaitoh }
2638 1.1 dyoung
2639 1.99 msaitoh ixgbe_get_bus_info(hw);
2640 1.99 msaitoh
2641 1.99 msaitoh /*
2642 1.99 msaitoh * Some devices don't use PCI-E, but there is no need
2643 1.99 msaitoh * to display "Unknown" for bus speed and width.
2644 1.99 msaitoh */
2645 1.99 msaitoh switch (hw->mac.type) {
2646 1.99 msaitoh case ixgbe_mac_X550EM_x:
2647 1.99 msaitoh case ixgbe_mac_X550EM_a:
2648 1.99 msaitoh return;
2649 1.99 msaitoh default:
2650 1.99 msaitoh goto display;
2651 1.1 dyoung }
2652 1.1 dyoung
2653 1.99 msaitoh get_parent_info:
2654 1.98 msaitoh /*
2655 1.99 msaitoh * For the Quad port adapter we need to parse back
2656 1.99 msaitoh * up the PCI tree to find the speed of the expansion
2657 1.99 msaitoh * slot into which this adapter is plugged. A bit more work.
2658 1.99 msaitoh */
2659 1.98 msaitoh dev = device_parent(device_parent(dev));
2660 1.99 msaitoh #if 0
2661 1.98 msaitoh #ifdef IXGBE_DEBUG
2662 1.99 msaitoh device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2663 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2664 1.98 msaitoh #endif
2665 1.98 msaitoh dev = device_parent(device_parent(dev));
2666 1.98 msaitoh #ifdef IXGBE_DEBUG
2667 1.99 msaitoh device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2668 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2669 1.99 msaitoh #endif
2670 1.1 dyoung #endif
2671 1.98 msaitoh /* Now get the PCI Express Capabilities offset */
2672 1.333 msaitoh if (pci_get_capability(sc->osdep.pc, sc->osdep.tag,
2673 1.99 msaitoh PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2674 1.99 msaitoh /*
2675 1.99 msaitoh * Hmm...can't get PCI-Express capabilities.
2676 1.99 msaitoh * Falling back to default method.
2677 1.99 msaitoh */
2678 1.99 msaitoh bus_info_valid = FALSE;
2679 1.99 msaitoh ixgbe_get_bus_info(hw);
2680 1.99 msaitoh goto display;
2681 1.99 msaitoh }
2682 1.98 msaitoh /* ...and read the Link Status Register */
2683 1.333 msaitoh link = pci_conf_read(sc->osdep.pc, sc->osdep.tag,
2684 1.120 msaitoh offset + PCIE_LCSR) >> 16;
2685 1.120 msaitoh ixgbe_set_pci_config_data_generic(hw, link);
2686 1.52 msaitoh
2687 1.98 msaitoh display:
2688 1.99 msaitoh device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2689 1.186 msaitoh ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2690 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2691 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2692 1.99 msaitoh "Unknown"),
2693 1.99 msaitoh ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2694 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2695 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2696 1.99 msaitoh "Unknown"));
2697 1.99 msaitoh
2698 1.99 msaitoh if (bus_info_valid) {
2699 1.99 msaitoh if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2700 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2701 1.99 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500))) {
2702 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2703 1.99 msaitoh " for this card\n is not sufficient for"
2704 1.99 msaitoh " optimal performance.\n");
2705 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2706 1.99 msaitoh "PCIE, or x4 PCIE Gen2 slot is required.\n");
2707 1.99 msaitoh }
2708 1.99 msaitoh if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2709 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2710 1.99 msaitoh (hw->bus.speed < ixgbe_bus_speed_8000))) {
2711 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2712 1.99 msaitoh " for this card\n is not sufficient for"
2713 1.99 msaitoh " optimal performance.\n");
2714 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2715 1.99 msaitoh "PCIE Gen3 slot is required.\n");
2716 1.99 msaitoh }
2717 1.99 msaitoh } else
2718 1.319 msaitoh device_printf(dev,
2719 1.319 msaitoh "Unable to determine slot speed/width. The speed/width "
2720 1.319 msaitoh "reported are that of the internal switch.\n");
2721 1.45 msaitoh
2722 1.45 msaitoh return;
2723 1.99 msaitoh } /* ixgbe_get_slot_info */
2724 1.1 dyoung
2725 1.99 msaitoh /************************************************************************
2726 1.321 msaitoh * ixgbe_enable_queue - Queue Interrupt Enabler
2727 1.99 msaitoh ************************************************************************/
2728 1.1 dyoung static inline void
2729 1.333 msaitoh ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
2730 1.1 dyoung {
2731 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2732 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2733 1.197 msaitoh u64 queue = 1ULL << vector;
2734 1.186 msaitoh u32 mask;
2735 1.1 dyoung
2736 1.139 knakahar mutex_enter(&que->dc_mtx);
2737 1.139 knakahar if (que->disabled_count > 0 && --que->disabled_count > 0)
2738 1.127 knakahar goto out;
2739 1.127 knakahar
2740 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2741 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2742 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2743 1.1 dyoung } else {
2744 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2745 1.98 msaitoh if (mask)
2746 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2747 1.98 msaitoh mask = (queue >> 32);
2748 1.98 msaitoh if (mask)
2749 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2750 1.1 dyoung }
2751 1.127 knakahar out:
2752 1.139 knakahar mutex_exit(&que->dc_mtx);
2753 1.99 msaitoh } /* ixgbe_enable_queue */
2754 1.1 dyoung
2755 1.99 msaitoh /************************************************************************
2756 1.139 knakahar * ixgbe_disable_queue_internal
2757 1.99 msaitoh ************************************************************************/
2758 1.82 msaitoh static inline void
2759 1.333 msaitoh ixgbe_disable_queue_internal(struct ixgbe_softc *sc, u32 vector, bool nestok)
2760 1.1 dyoung {
2761 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2762 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2763 1.197 msaitoh u64 queue = 1ULL << vector;
2764 1.186 msaitoh u32 mask;
2765 1.1 dyoung
2766 1.139 knakahar mutex_enter(&que->dc_mtx);
2767 1.139 knakahar
2768 1.139 knakahar if (que->disabled_count > 0) {
2769 1.139 knakahar if (nestok)
2770 1.139 knakahar que->disabled_count++;
2771 1.139 knakahar goto out;
2772 1.139 knakahar }
2773 1.139 knakahar que->disabled_count++;
2774 1.127 knakahar
2775 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2776 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2777 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2778 1.1 dyoung } else {
2779 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2780 1.98 msaitoh if (mask)
2781 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2782 1.98 msaitoh mask = (queue >> 32);
2783 1.98 msaitoh if (mask)
2784 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2785 1.1 dyoung }
2786 1.127 knakahar out:
2787 1.139 knakahar mutex_exit(&que->dc_mtx);
2788 1.139 knakahar } /* ixgbe_disable_queue_internal */
2789 1.139 knakahar
2790 1.139 knakahar /************************************************************************
2791 1.139 knakahar * ixgbe_disable_queue
2792 1.139 knakahar ************************************************************************/
2793 1.139 knakahar static inline void
2794 1.333 msaitoh ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
2795 1.139 knakahar {
2796 1.139 knakahar
2797 1.333 msaitoh ixgbe_disable_queue_internal(sc, vector, true);
2798 1.99 msaitoh } /* ixgbe_disable_queue */
2799 1.1 dyoung
2800 1.99 msaitoh /************************************************************************
2801 1.133 knakahar * ixgbe_sched_handle_que - schedule deferred packet processing
2802 1.133 knakahar ************************************************************************/
2803 1.133 knakahar static inline void
2804 1.333 msaitoh ixgbe_sched_handle_que(struct ixgbe_softc *sc, struct ix_queue *que)
2805 1.133 knakahar {
2806 1.133 knakahar
2807 1.185 msaitoh if (que->txrx_use_workqueue) {
2808 1.133 knakahar /*
2809 1.333 msaitoh * sc->que_wq is bound to each CPU instead of
2810 1.133 knakahar * each NIC queue to reduce workqueue kthread. As we
2811 1.133 knakahar * should consider about interrupt affinity in this
2812 1.133 knakahar * function, the workqueue kthread must be WQ_PERCPU.
2813 1.133 knakahar * If create WQ_PERCPU workqueue kthread for each NIC
2814 1.133 knakahar * queue, that number of created workqueue kthread is
2815 1.133 knakahar * (number of used NIC queue) * (number of CPUs) =
2816 1.133 knakahar * (number of CPUs) ^ 2 most often.
2817 1.133 knakahar *
2818 1.133 knakahar * The same NIC queue's interrupts are avoided by
2819 1.133 knakahar * masking the queue's interrupt. And different
2820 1.133 knakahar * NIC queue's interrupts use different struct work
2821 1.133 knakahar * (que->wq_cookie). So, "enqueued flag" to avoid
2822 1.133 knakahar * twice workqueue_enqueue() is not required .
2823 1.133 knakahar */
2824 1.333 msaitoh workqueue_enqueue(sc->que_wq, &que->wq_cookie, curcpu());
2825 1.319 msaitoh } else
2826 1.133 knakahar softint_schedule(que->que_si);
2827 1.133 knakahar }
2828 1.133 knakahar
2829 1.133 knakahar /************************************************************************
2830 1.99 msaitoh * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2831 1.99 msaitoh ************************************************************************/
2832 1.34 msaitoh static int
2833 1.1 dyoung ixgbe_msix_que(void *arg)
2834 1.1 dyoung {
2835 1.1 dyoung struct ix_queue *que = arg;
2836 1.339 msaitoh struct ixgbe_softc *sc = que->sc;
2837 1.333 msaitoh struct ifnet *ifp = sc->ifp;
2838 1.1 dyoung struct tx_ring *txr = que->txr;
2839 1.1 dyoung struct rx_ring *rxr = que->rxr;
2840 1.1 dyoung u32 newitr = 0;
2841 1.1 dyoung
2842 1.33 msaitoh /* Protect against spurious interrupts */
2843 1.33 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
2844 1.34 msaitoh return 0;
2845 1.33 msaitoh
2846 1.333 msaitoh ixgbe_disable_queue(sc, que->msix);
2847 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
2848 1.1 dyoung
2849 1.147 knakahar /*
2850 1.147 knakahar * Don't change "que->txrx_use_workqueue" from this point to avoid
2851 1.147 knakahar * flip-flopping softint/workqueue mode in one deferred processing.
2852 1.147 knakahar */
2853 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
2854 1.147 knakahar
2855 1.1 dyoung IXGBE_TX_LOCK(txr);
2856 1.33 msaitoh ixgbe_txeof(txr);
2857 1.1 dyoung IXGBE_TX_UNLOCK(txr);
2858 1.1 dyoung
2859 1.1 dyoung /* Do AIM now? */
2860 1.1 dyoung
2861 1.333 msaitoh if (sc->enable_aim == false)
2862 1.1 dyoung goto no_calc;
2863 1.1 dyoung /*
2864 1.99 msaitoh * Do Adaptive Interrupt Moderation:
2865 1.99 msaitoh * - Write out last calculated setting
2866 1.99 msaitoh * - Calculate based on average size over
2867 1.99 msaitoh * the last interval.
2868 1.99 msaitoh */
2869 1.99 msaitoh if (que->eitr_setting)
2870 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, que->eitr_setting);
2871 1.99 msaitoh
2872 1.98 msaitoh que->eitr_setting = 0;
2873 1.1 dyoung
2874 1.98 msaitoh /* Idle, do nothing */
2875 1.186 msaitoh if ((txr->bytes == 0) && (rxr->bytes == 0))
2876 1.186 msaitoh goto no_calc;
2877 1.185 msaitoh
2878 1.1 dyoung if ((txr->bytes) && (txr->packets))
2879 1.98 msaitoh newitr = txr->bytes/txr->packets;
2880 1.1 dyoung if ((rxr->bytes) && (rxr->packets))
2881 1.165 riastrad newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2882 1.1 dyoung newitr += 24; /* account for hardware frame, crc */
2883 1.1 dyoung
2884 1.1 dyoung /* set an upper boundary */
2885 1.165 riastrad newitr = uimin(newitr, 3000);
2886 1.1 dyoung
2887 1.1 dyoung /* Be nice to the mid range */
2888 1.1 dyoung if ((newitr > 300) && (newitr < 1200))
2889 1.1 dyoung newitr = (newitr / 3);
2890 1.1 dyoung else
2891 1.1 dyoung newitr = (newitr / 2);
2892 1.1 dyoung
2893 1.124 msaitoh /*
2894 1.124 msaitoh * When RSC is used, ITR interval must be larger than RSC_DELAY.
2895 1.124 msaitoh * Currently, we use 2us for RSC_DELAY. The minimum value is always
2896 1.124 msaitoh * greater than 2us on 100M (and 10M?(not documented)), but it's not
2897 1.124 msaitoh * on 1G and higher.
2898 1.124 msaitoh */
2899 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
2900 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL))
2901 1.124 msaitoh if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2902 1.124 msaitoh newitr = IXGBE_MIN_RSC_EITR_10G1G;
2903 1.124 msaitoh
2904 1.186 msaitoh /* save for next interrupt */
2905 1.186 msaitoh que->eitr_setting = newitr;
2906 1.1 dyoung
2907 1.98 msaitoh /* Reset state */
2908 1.98 msaitoh txr->bytes = 0;
2909 1.98 msaitoh txr->packets = 0;
2910 1.98 msaitoh rxr->bytes = 0;
2911 1.98 msaitoh rxr->packets = 0;
2912 1.1 dyoung
2913 1.1 dyoung no_calc:
2914 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
2915 1.99 msaitoh
2916 1.34 msaitoh return 1;
2917 1.99 msaitoh } /* ixgbe_msix_que */
2918 1.1 dyoung
2919 1.99 msaitoh /************************************************************************
2920 1.99 msaitoh * ixgbe_media_status - Media Ioctl callback
2921 1.98 msaitoh *
2922 1.99 msaitoh * Called whenever the user queries the status of
2923 1.99 msaitoh * the interface using ifconfig.
2924 1.99 msaitoh ************************************************************************/
2925 1.98 msaitoh static void
2926 1.98 msaitoh ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2927 1.1 dyoung {
2928 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2929 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2930 1.98 msaitoh int layer;
2931 1.1 dyoung
2932 1.98 msaitoh INIT_DEBUGOUT("ixgbe_media_status: begin");
2933 1.333 msaitoh ixgbe_update_link_status(sc);
2934 1.1 dyoung
2935 1.1 dyoung ifmr->ifm_status = IFM_AVALID;
2936 1.1 dyoung ifmr->ifm_active = IFM_ETHER;
2937 1.1 dyoung
2938 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
2939 1.68 msaitoh ifmr->ifm_active |= IFM_NONE;
2940 1.1 dyoung return;
2941 1.1 dyoung }
2942 1.1 dyoung
2943 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE;
2944 1.333 msaitoh layer = sc->phy_layer;
2945 1.1 dyoung
2946 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2947 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2948 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2949 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2950 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2951 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2952 1.333 msaitoh switch (sc->link_speed) {
2953 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2954 1.43 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2955 1.43 msaitoh break;
2956 1.103 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
2957 1.103 msaitoh ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2958 1.103 msaitoh break;
2959 1.103 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
2960 1.103 msaitoh ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2961 1.103 msaitoh break;
2962 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2963 1.33 msaitoh ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2964 1.43 msaitoh break;
2965 1.43 msaitoh case IXGBE_LINK_SPEED_100_FULL:
2966 1.24 msaitoh ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2967 1.43 msaitoh break;
2968 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
2969 1.99 msaitoh ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2970 1.99 msaitoh break;
2971 1.43 msaitoh }
2972 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2973 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2974 1.333 msaitoh switch (sc->link_speed) {
2975 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2976 1.43 msaitoh ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2977 1.43 msaitoh break;
2978 1.43 msaitoh }
2979 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2980 1.333 msaitoh switch (sc->link_speed) {
2981 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2982 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2983 1.43 msaitoh break;
2984 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2985 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2986 1.43 msaitoh break;
2987 1.43 msaitoh }
2988 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2989 1.333 msaitoh switch (sc->link_speed) {
2990 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2991 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2992 1.43 msaitoh break;
2993 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2994 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2995 1.43 msaitoh break;
2996 1.43 msaitoh }
2997 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2998 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2999 1.333 msaitoh switch (sc->link_speed) {
3000 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3001 1.43 msaitoh ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
3002 1.43 msaitoh break;
3003 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3004 1.28 msaitoh ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
3005 1.43 msaitoh break;
3006 1.43 msaitoh }
3007 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3008 1.333 msaitoh switch (sc->link_speed) {
3009 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3010 1.43 msaitoh ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
3011 1.43 msaitoh break;
3012 1.43 msaitoh }
3013 1.43 msaitoh /*
3014 1.99 msaitoh * XXX: These need to use the proper media types once
3015 1.99 msaitoh * they're added.
3016 1.99 msaitoh */
3017 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3018 1.333 msaitoh switch (sc->link_speed) {
3019 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3020 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
3021 1.48 msaitoh break;
3022 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3023 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3024 1.48 msaitoh break;
3025 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3026 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3027 1.48 msaitoh break;
3028 1.48 msaitoh }
3029 1.99 msaitoh else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
3030 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
3031 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3032 1.333 msaitoh switch (sc->link_speed) {
3033 1.48 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3034 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
3035 1.48 msaitoh break;
3036 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3037 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3038 1.48 msaitoh break;
3039 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3040 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3041 1.48 msaitoh break;
3042 1.48 msaitoh }
3043 1.98 msaitoh
3044 1.43 msaitoh /* If nothing is recognized... */
3045 1.43 msaitoh #if 0
3046 1.43 msaitoh if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
3047 1.43 msaitoh ifmr->ifm_active |= IFM_UNKNOWN;
3048 1.43 msaitoh #endif
3049 1.98 msaitoh
3050 1.104 msaitoh ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
3051 1.104 msaitoh
3052 1.44 msaitoh /* Display current flow control setting used on link */
3053 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
3054 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3055 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3056 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
3057 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3058 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3059 1.1 dyoung
3060 1.1 dyoung return;
3061 1.99 msaitoh } /* ixgbe_media_status */
3062 1.1 dyoung
3063 1.99 msaitoh /************************************************************************
3064 1.99 msaitoh * ixgbe_media_change - Media Ioctl callback
3065 1.1 dyoung *
3066 1.99 msaitoh * Called when the user changes speed/duplex using
3067 1.99 msaitoh * media/mediopt option with ifconfig.
3068 1.99 msaitoh ************************************************************************/
3069 1.1 dyoung static int
3070 1.98 msaitoh ixgbe_media_change(struct ifnet *ifp)
3071 1.1 dyoung {
3072 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3073 1.333 msaitoh struct ifmedia *ifm = &sc->media;
3074 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3075 1.43 msaitoh ixgbe_link_speed speed = 0;
3076 1.94 msaitoh ixgbe_link_speed link_caps = 0;
3077 1.94 msaitoh bool negotiate = false;
3078 1.94 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
3079 1.1 dyoung
3080 1.1 dyoung INIT_DEBUGOUT("ixgbe_media_change: begin");
3081 1.1 dyoung
3082 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3083 1.1 dyoung return (EINVAL);
3084 1.1 dyoung
3085 1.44 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
3086 1.144 msaitoh return (EPERM);
3087 1.44 msaitoh
3088 1.43 msaitoh /*
3089 1.99 msaitoh * We don't actually need to check against the supported
3090 1.99 msaitoh * media types of the adapter; ifmedia will take care of
3091 1.99 msaitoh * that for us.
3092 1.99 msaitoh */
3093 1.43 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) {
3094 1.98 msaitoh case IFM_AUTO:
3095 1.98 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3096 1.98 msaitoh &negotiate);
3097 1.98 msaitoh if (err != IXGBE_SUCCESS) {
3098 1.333 msaitoh device_printf(sc->dev, "Unable to determine "
3099 1.98 msaitoh "supported advertise speeds\n");
3100 1.98 msaitoh return (ENODEV);
3101 1.98 msaitoh }
3102 1.98 msaitoh speed |= link_caps;
3103 1.98 msaitoh break;
3104 1.98 msaitoh case IFM_10G_T:
3105 1.98 msaitoh case IFM_10G_LRM:
3106 1.98 msaitoh case IFM_10G_LR:
3107 1.98 msaitoh case IFM_10G_TWINAX:
3108 1.181 msaitoh case IFM_10G_SR:
3109 1.181 msaitoh case IFM_10G_CX4:
3110 1.98 msaitoh case IFM_10G_KR:
3111 1.98 msaitoh case IFM_10G_KX4:
3112 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
3113 1.98 msaitoh break;
3114 1.103 msaitoh case IFM_5000_T:
3115 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
3116 1.103 msaitoh break;
3117 1.103 msaitoh case IFM_2500_T:
3118 1.99 msaitoh case IFM_2500_KX:
3119 1.99 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3120 1.99 msaitoh break;
3121 1.98 msaitoh case IFM_1000_T:
3122 1.98 msaitoh case IFM_1000_LX:
3123 1.98 msaitoh case IFM_1000_SX:
3124 1.98 msaitoh case IFM_1000_KX:
3125 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
3126 1.98 msaitoh break;
3127 1.98 msaitoh case IFM_100_TX:
3128 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
3129 1.98 msaitoh break;
3130 1.99 msaitoh case IFM_10_T:
3131 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
3132 1.99 msaitoh break;
3133 1.140 msaitoh case IFM_NONE:
3134 1.140 msaitoh break;
3135 1.98 msaitoh default:
3136 1.98 msaitoh goto invalid;
3137 1.48 msaitoh }
3138 1.43 msaitoh
3139 1.43 msaitoh hw->mac.autotry_restart = TRUE;
3140 1.43 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
3141 1.333 msaitoh sc->advertise = 0;
3142 1.109 msaitoh if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3143 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3144 1.333 msaitoh sc->advertise |= 1 << 2;
3145 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3146 1.333 msaitoh sc->advertise |= 1 << 1;
3147 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3148 1.333 msaitoh sc->advertise |= 1 << 0;
3149 1.99 msaitoh if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3150 1.333 msaitoh sc->advertise |= 1 << 3;
3151 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3152 1.333 msaitoh sc->advertise |= 1 << 4;
3153 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3154 1.333 msaitoh sc->advertise |= 1 << 5;
3155 1.51 msaitoh }
3156 1.1 dyoung
3157 1.1 dyoung return (0);
3158 1.43 msaitoh
3159 1.43 msaitoh invalid:
3160 1.333 msaitoh device_printf(sc->dev, "Invalid media type!\n");
3161 1.98 msaitoh
3162 1.43 msaitoh return (EINVAL);
3163 1.99 msaitoh } /* ixgbe_media_change */
3164 1.1 dyoung
3165 1.99 msaitoh /************************************************************************
3166 1.320 msaitoh * ixgbe_msix_admin - Link status change ISR (MSI-X)
3167 1.99 msaitoh ************************************************************************/
3168 1.98 msaitoh static int
3169 1.233 msaitoh ixgbe_msix_admin(void *arg)
3170 1.98 msaitoh {
3171 1.333 msaitoh struct ixgbe_softc *sc = arg;
3172 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3173 1.277 msaitoh u32 eicr;
3174 1.273 msaitoh u32 eims_orig;
3175 1.273 msaitoh u32 eims_disable = 0;
3176 1.98 msaitoh
3177 1.333 msaitoh IXGBE_EVC_ADD(&sc->admin_irqev, 1);
3178 1.98 msaitoh
3179 1.273 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3180 1.273 msaitoh /* Pause other interrupts */
3181 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3182 1.273 msaitoh
3183 1.125 knakahar /*
3184 1.273 msaitoh * First get the cause.
3185 1.273 msaitoh *
3186 1.125 knakahar * The specifications of 82598, 82599, X540 and X550 say EICS register
3187 1.125 knakahar * is write only. However, Linux says it is a workaround for silicon
3188 1.273 msaitoh * errata to read EICS instead of EICR to get interrupt cause.
3189 1.273 msaitoh * At least, reading EICR clears lower 16bits of EIMS on 82598.
3190 1.125 knakahar */
3191 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3192 1.98 msaitoh /* Be sure the queue bits are not cleared */
3193 1.99 msaitoh eicr &= ~IXGBE_EICR_RTX_QUEUE;
3194 1.265 msaitoh /* Clear all OTHER interrupts with write */
3195 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3196 1.1 dyoung
3197 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
3198 1.277 msaitoh
3199 1.277 msaitoh /* Re-enable some OTHER interrupts */
3200 1.277 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3201 1.277 msaitoh
3202 1.277 msaitoh return 1;
3203 1.277 msaitoh } /* ixgbe_msix_admin */
3204 1.277 msaitoh
3205 1.277 msaitoh static void
3206 1.333 msaitoh ixgbe_intr_admin_common(struct ixgbe_softc *sc, u32 eicr, u32 *eims_disable)
3207 1.277 msaitoh {
3208 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3209 1.277 msaitoh u32 task_requests = 0;
3210 1.277 msaitoh s32 retval;
3211 1.277 msaitoh
3212 1.266 msaitoh /* Link status change */
3213 1.266 msaitoh if (eicr & IXGBE_EICR_LSC) {
3214 1.266 msaitoh task_requests |= IXGBE_REQUEST_TASK_LSC;
3215 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3216 1.266 msaitoh }
3217 1.266 msaitoh
3218 1.204 msaitoh if (ixgbe_is_sfp(hw)) {
3219 1.310 msaitoh u32 eicr_mask;
3220 1.310 msaitoh
3221 1.204 msaitoh /* Pluggable optics-related interrupt */
3222 1.204 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
3223 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3224 1.204 msaitoh else
3225 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3226 1.204 msaitoh
3227 1.204 msaitoh /*
3228 1.204 msaitoh * An interrupt might not arrive when a module is inserted.
3229 1.204 msaitoh * When an link status change interrupt occurred and the driver
3230 1.204 msaitoh * still regard SFP as unplugged, issue the module softint
3231 1.204 msaitoh * and then issue LSC interrupt.
3232 1.204 msaitoh */
3233 1.204 msaitoh if ((eicr & eicr_mask)
3234 1.204 msaitoh || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3235 1.204 msaitoh && (eicr & IXGBE_EICR_LSC))) {
3236 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD;
3237 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3238 1.204 msaitoh }
3239 1.204 msaitoh
3240 1.204 msaitoh if ((hw->mac.type == ixgbe_mac_82599EB) &&
3241 1.204 msaitoh (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3242 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF;
3243 1.277 msaitoh *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3244 1.204 msaitoh }
3245 1.204 msaitoh }
3246 1.204 msaitoh
3247 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3248 1.311 msaitoh #ifdef IXGBE_FDIR
3249 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
3250 1.99 msaitoh (eicr & IXGBE_EICR_FLOW_DIR)) {
3251 1.333 msaitoh if (!atomic_cas_uint(&sc->fdir_reinit, 0, 1)) {
3252 1.275 msaitoh task_requests |= IXGBE_REQUEST_TASK_FDIR;
3253 1.275 msaitoh /* Disable the interrupt */
3254 1.277 msaitoh *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3255 1.275 msaitoh }
3256 1.99 msaitoh }
3257 1.311 msaitoh #endif
3258 1.99 msaitoh
3259 1.99 msaitoh if (eicr & IXGBE_EICR_ECC) {
3260 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3261 1.312 msaitoh &ixgbe_errlog_intrvl))
3262 1.333 msaitoh device_printf(sc->dev,
3263 1.312 msaitoh "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3264 1.98 msaitoh }
3265 1.1 dyoung
3266 1.98 msaitoh /* Check for over temp condition */
3267 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3268 1.333 msaitoh switch (sc->hw.mac.type) {
3269 1.99 msaitoh case ixgbe_mac_X550EM_a:
3270 1.99 msaitoh if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3271 1.99 msaitoh break;
3272 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3273 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3274 1.99 msaitoh break;
3275 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3276 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3277 1.333 msaitoh device_printf(sc->dev,
3278 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3279 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3280 1.333 msaitoh device_printf(sc->dev,
3281 1.312 msaitoh "System shutdown required!\n");
3282 1.312 msaitoh }
3283 1.99 msaitoh break;
3284 1.99 msaitoh default:
3285 1.99 msaitoh if (!(eicr & IXGBE_EICR_TS))
3286 1.99 msaitoh break;
3287 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3288 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3289 1.99 msaitoh break;
3290 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3291 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3292 1.333 msaitoh device_printf(sc->dev,
3293 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3294 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3295 1.333 msaitoh device_printf(sc->dev,
3296 1.312 msaitoh "System shutdown required!\n");
3297 1.312 msaitoh }
3298 1.99 msaitoh break;
3299 1.99 msaitoh }
3300 1.1 dyoung }
3301 1.99 msaitoh
3302 1.99 msaitoh /* Check for VF message */
3303 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
3304 1.233 msaitoh (eicr & IXGBE_EICR_MAILBOX)) {
3305 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MBX;
3306 1.277 msaitoh *eims_disable |= IXGBE_EIMS_MAILBOX;
3307 1.233 msaitoh }
3308 1.1 dyoung }
3309 1.1 dyoung
3310 1.98 msaitoh /* Check for fan failure */
3311 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3312 1.333 msaitoh ixgbe_check_fan_failure(sc, eicr, true);
3313 1.1 dyoung
3314 1.98 msaitoh /* External PHY interrupt */
3315 1.99 msaitoh if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3316 1.99 msaitoh (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3317 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_PHY;
3318 1.277 msaitoh *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3319 1.233 msaitoh }
3320 1.233 msaitoh
3321 1.233 msaitoh if (task_requests != 0) {
3322 1.333 msaitoh mutex_enter(&sc->admin_mtx);
3323 1.333 msaitoh sc->task_requests |= task_requests;
3324 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
3325 1.333 msaitoh mutex_exit(&sc->admin_mtx);
3326 1.186 msaitoh }
3327 1.277 msaitoh }
3328 1.1 dyoung
3329 1.124 msaitoh static void
3330 1.333 msaitoh ixgbe_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
3331 1.124 msaitoh {
3332 1.185 msaitoh
3333 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
3334 1.186 msaitoh itr |= itr << 16;
3335 1.186 msaitoh else
3336 1.186 msaitoh itr |= IXGBE_EITR_CNT_WDIS;
3337 1.124 msaitoh
3338 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(index), itr);
3339 1.124 msaitoh }
3340 1.124 msaitoh
3341 1.124 msaitoh
3342 1.99 msaitoh /************************************************************************
3343 1.99 msaitoh * ixgbe_sysctl_interrupt_rate_handler
3344 1.99 msaitoh ************************************************************************/
3345 1.98 msaitoh static int
3346 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3347 1.1 dyoung {
3348 1.98 msaitoh struct sysctlnode node = *rnode;
3349 1.99 msaitoh struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3350 1.339 msaitoh struct ixgbe_softc *sc;
3351 1.98 msaitoh uint32_t reg, usec, rate;
3352 1.98 msaitoh int error;
3353 1.45 msaitoh
3354 1.98 msaitoh if (que == NULL)
3355 1.98 msaitoh return 0;
3356 1.169 msaitoh
3357 1.333 msaitoh sc = que->sc;
3358 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
3359 1.169 msaitoh return (EPERM);
3360 1.169 msaitoh
3361 1.333 msaitoh reg = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(que->msix));
3362 1.98 msaitoh usec = ((reg & 0x0FF8) >> 3);
3363 1.98 msaitoh if (usec > 0)
3364 1.98 msaitoh rate = 500000 / usec;
3365 1.98 msaitoh else
3366 1.98 msaitoh rate = 0;
3367 1.98 msaitoh node.sysctl_data = &rate;
3368 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
3369 1.98 msaitoh if (error || newp == NULL)
3370 1.98 msaitoh return error;
3371 1.98 msaitoh reg &= ~0xfff; /* default, no limitation */
3372 1.98 msaitoh if (rate > 0 && rate < 500000) {
3373 1.98 msaitoh if (rate < 1000)
3374 1.98 msaitoh rate = 1000;
3375 1.228 msaitoh reg |= ((4000000 / rate) & 0xff8);
3376 1.124 msaitoh /*
3377 1.124 msaitoh * When RSC is used, ITR interval must be larger than
3378 1.124 msaitoh * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3379 1.124 msaitoh * The minimum value is always greater than 2us on 100M
3380 1.124 msaitoh * (and 10M?(not documented)), but it's not on 1G and higher.
3381 1.124 msaitoh */
3382 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
3383 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3384 1.333 msaitoh if ((sc->num_queues > 1)
3385 1.124 msaitoh && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3386 1.124 msaitoh return EINVAL;
3387 1.124 msaitoh }
3388 1.343 msaitoh sc->max_interrupt_rate = rate;
3389 1.124 msaitoh } else
3390 1.343 msaitoh sc->max_interrupt_rate = 0;
3391 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, reg);
3392 1.99 msaitoh
3393 1.99 msaitoh return (0);
3394 1.99 msaitoh } /* ixgbe_sysctl_interrupt_rate_handler */
3395 1.45 msaitoh
3396 1.98 msaitoh const struct sysctlnode *
3397 1.333 msaitoh ixgbe_sysctl_instance(struct ixgbe_softc *sc)
3398 1.98 msaitoh {
3399 1.98 msaitoh const char *dvname;
3400 1.98 msaitoh struct sysctllog **log;
3401 1.98 msaitoh int rc;
3402 1.98 msaitoh const struct sysctlnode *rnode;
3403 1.1 dyoung
3404 1.333 msaitoh if (sc->sysctltop != NULL)
3405 1.333 msaitoh return sc->sysctltop;
3406 1.1 dyoung
3407 1.333 msaitoh log = &sc->sysctllog;
3408 1.333 msaitoh dvname = device_xname(sc->dev);
3409 1.1 dyoung
3410 1.98 msaitoh if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3411 1.98 msaitoh 0, CTLTYPE_NODE, dvname,
3412 1.98 msaitoh SYSCTL_DESCR("ixgbe information and settings"),
3413 1.98 msaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3414 1.98 msaitoh goto err;
3415 1.63 msaitoh
3416 1.98 msaitoh return rnode;
3417 1.98 msaitoh err:
3418 1.333 msaitoh device_printf(sc->dev,
3419 1.207 msaitoh "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3420 1.98 msaitoh return NULL;
3421 1.63 msaitoh }
3422 1.63 msaitoh
3423 1.99 msaitoh /************************************************************************
3424 1.99 msaitoh * ixgbe_add_device_sysctls
3425 1.99 msaitoh ************************************************************************/
3426 1.63 msaitoh static void
3427 1.333 msaitoh ixgbe_add_device_sysctls(struct ixgbe_softc *sc)
3428 1.1 dyoung {
3429 1.333 msaitoh device_t dev = sc->dev;
3430 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3431 1.98 msaitoh struct sysctllog **log;
3432 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
3433 1.1 dyoung
3434 1.333 msaitoh log = &sc->sysctllog;
3435 1.1 dyoung
3436 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
3437 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl root\n");
3438 1.98 msaitoh return;
3439 1.98 msaitoh }
3440 1.1 dyoung
3441 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3442 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3443 1.158 msaitoh "debug", SYSCTL_DESCR("Debug Info"),
3444 1.333 msaitoh ixgbe_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL)
3445 1.280 msaitoh != 0)
3446 1.158 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3447 1.158 msaitoh
3448 1.158 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3449 1.286 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3450 1.286 msaitoh "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3451 1.286 msaitoh ixgbe_sysctl_rx_copy_len, 0,
3452 1.333 msaitoh (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
3453 1.286 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3454 1.286 msaitoh
3455 1.286 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3456 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3457 1.314 msaitoh "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
3458 1.333 msaitoh NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3459 1.314 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3460 1.314 msaitoh
3461 1.314 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3462 1.314 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3463 1.314 msaitoh "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
3464 1.333 msaitoh NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3465 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3466 1.1 dyoung
3467 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3468 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
3469 1.313 msaitoh SYSCTL_DESCR("max number of RX packets to process"),
3470 1.333 msaitoh ixgbe_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3471 1.313 msaitoh CTL_EOL) != 0)
3472 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3473 1.313 msaitoh
3474 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3475 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
3476 1.313 msaitoh SYSCTL_DESCR("max number of TX packets to process"),
3477 1.333 msaitoh ixgbe_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3478 1.313 msaitoh CTL_EOL) != 0)
3479 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3480 1.313 msaitoh
3481 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3482 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3483 1.98 msaitoh "num_queues", SYSCTL_DESCR("Number of queues"),
3484 1.333 msaitoh NULL, 0, &sc->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3485 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3486 1.43 msaitoh
3487 1.98 msaitoh /* Sysctls for all devices */
3488 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3489 1.99 msaitoh CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3490 1.333 msaitoh ixgbe_sysctl_flowcntl, 0, (void *)sc, 0, CTL_CREATE,
3491 1.99 msaitoh CTL_EOL) != 0)
3492 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3493 1.63 msaitoh
3494 1.333 msaitoh sc->enable_aim = ixgbe_enable_aim;
3495 1.343 msaitoh sc->max_interrupt_rate = ixgbe_max_interrupt_rate;
3496 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3497 1.99 msaitoh CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3498 1.333 msaitoh NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3499 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3500 1.1 dyoung
3501 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3502 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3503 1.98 msaitoh "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3504 1.333 msaitoh ixgbe_sysctl_advertise, 0, (void *)sc, 0, CTL_CREATE,
3505 1.99 msaitoh CTL_EOL) != 0)
3506 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3507 1.1 dyoung
3508 1.147 knakahar /*
3509 1.147 knakahar * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3510 1.147 knakahar * it causesflip-flopping softint/workqueue mode in one deferred
3511 1.147 knakahar * processing. Therefore, preempt_disable()/preempt_enable() are
3512 1.147 knakahar * required in ixgbe_sched_handle_que() to avoid
3513 1.147 knakahar * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3514 1.147 knakahar * I think changing "que->txrx_use_workqueue" in interrupt handler
3515 1.147 knakahar * is lighter than doing preempt_disable()/preempt_enable() in every
3516 1.147 knakahar * ixgbe_sched_handle_que().
3517 1.147 knakahar */
3518 1.333 msaitoh sc->txrx_use_workqueue = ixgbe_txrx_workqueue;
3519 1.128 knakahar if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3520 1.280 msaitoh CTLTYPE_BOOL, "txrx_workqueue",
3521 1.280 msaitoh SYSCTL_DESCR("Use workqueue for packet processing"),
3522 1.333 msaitoh NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE,
3523 1.280 msaitoh CTL_EOL) != 0)
3524 1.128 knakahar aprint_error_dev(dev, "could not create sysctl\n");
3525 1.128 knakahar
3526 1.98 msaitoh #ifdef IXGBE_DEBUG
3527 1.98 msaitoh /* testing sysctls (for all devices) */
3528 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3529 1.99 msaitoh CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3530 1.333 msaitoh ixgbe_sysctl_power_state, 0, (void *)sc, 0, CTL_CREATE,
3531 1.99 msaitoh CTL_EOL) != 0)
3532 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3533 1.45 msaitoh
3534 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3535 1.99 msaitoh CTLTYPE_STRING, "print_rss_config",
3536 1.99 msaitoh SYSCTL_DESCR("Prints RSS Configuration"),
3537 1.333 msaitoh ixgbe_sysctl_print_rss_config, 0, (void *)sc, 0, CTL_CREATE,
3538 1.99 msaitoh CTL_EOL) != 0)
3539 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3540 1.98 msaitoh #endif
3541 1.98 msaitoh /* for X550 series devices */
3542 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3543 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3544 1.99 msaitoh CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3545 1.333 msaitoh ixgbe_sysctl_dmac, 0, (void *)sc, 0, CTL_CREATE,
3546 1.99 msaitoh CTL_EOL) != 0)
3547 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3548 1.1 dyoung
3549 1.98 msaitoh /* for WoL-capable devices */
3550 1.333 msaitoh if (sc->wol_support) {
3551 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3552 1.99 msaitoh CTLTYPE_BOOL, "wol_enable",
3553 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3554 1.333 msaitoh ixgbe_sysctl_wol_enable, 0, (void *)sc, 0, CTL_CREATE,
3555 1.99 msaitoh CTL_EOL) != 0)
3556 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3557 1.1 dyoung
3558 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3559 1.99 msaitoh CTLTYPE_INT, "wufc",
3560 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3561 1.333 msaitoh ixgbe_sysctl_wufc, 0, (void *)sc, 0, CTL_CREATE,
3562 1.99 msaitoh CTL_EOL) != 0)
3563 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3564 1.98 msaitoh }
3565 1.1 dyoung
3566 1.98 msaitoh /* for X552/X557-AT devices */
3567 1.325 msaitoh if ((hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) ||
3568 1.325 msaitoh (hw->device_id == IXGBE_DEV_ID_X550EM_A_10G_T)) {
3569 1.98 msaitoh const struct sysctlnode *phy_node;
3570 1.1 dyoung
3571 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3572 1.98 msaitoh "phy", SYSCTL_DESCR("External PHY sysctls"),
3573 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3574 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3575 1.98 msaitoh return;
3576 1.98 msaitoh }
3577 1.1 dyoung
3578 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3579 1.99 msaitoh CTLTYPE_INT, "temp",
3580 1.99 msaitoh SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3581 1.333 msaitoh ixgbe_sysctl_phy_temp, 0, (void *)sc, 0, CTL_CREATE,
3582 1.99 msaitoh CTL_EOL) != 0)
3583 1.99 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3584 1.99 msaitoh
3585 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3586 1.99 msaitoh CTLTYPE_INT, "overtemp_occurred",
3587 1.280 msaitoh SYSCTL_DESCR(
3588 1.280 msaitoh "External PHY High Temperature Event Occurred"),
3589 1.333 msaitoh ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)sc, 0,
3590 1.99 msaitoh CTL_CREATE, CTL_EOL) != 0)
3591 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3592 1.99 msaitoh }
3593 1.33 msaitoh
3594 1.163 msaitoh if ((hw->mac.type == ixgbe_mac_X550EM_a)
3595 1.163 msaitoh && (hw->phy.type == ixgbe_phy_fw))
3596 1.163 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3597 1.163 msaitoh CTLTYPE_BOOL, "force_10_100_autonego",
3598 1.163 msaitoh SYSCTL_DESCR("Force autonego on 10M and 100M"),
3599 1.163 msaitoh NULL, 0, &hw->phy.force_10_100_autonego, 0,
3600 1.163 msaitoh CTL_CREATE, CTL_EOL) != 0)
3601 1.163 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3602 1.163 msaitoh
3603 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) {
3604 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3605 1.99 msaitoh CTLTYPE_INT, "eee_state",
3606 1.99 msaitoh SYSCTL_DESCR("EEE Power Save State"),
3607 1.333 msaitoh ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE,
3608 1.99 msaitoh CTL_EOL) != 0)
3609 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3610 1.98 msaitoh }
3611 1.99 msaitoh } /* ixgbe_add_device_sysctls */
3612 1.1 dyoung
3613 1.99 msaitoh /************************************************************************
3614 1.99 msaitoh * ixgbe_allocate_pci_resources
3615 1.99 msaitoh ************************************************************************/
3616 1.98 msaitoh static int
3617 1.333 msaitoh ixgbe_allocate_pci_resources(struct ixgbe_softc *sc,
3618 1.98 msaitoh const struct pci_attach_args *pa)
3619 1.1 dyoung {
3620 1.346 msaitoh pcireg_t memtype, csr;
3621 1.333 msaitoh device_t dev = sc->dev;
3622 1.98 msaitoh bus_addr_t addr;
3623 1.98 msaitoh int flags;
3624 1.1 dyoung
3625 1.98 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3626 1.98 msaitoh switch (memtype) {
3627 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3628 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3629 1.333 msaitoh sc->osdep.mem_bus_space_tag = pa->pa_memt;
3630 1.98 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3631 1.333 msaitoh memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
3632 1.98 msaitoh goto map_err;
3633 1.98 msaitoh if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3634 1.98 msaitoh aprint_normal_dev(dev, "clearing prefetchable bit\n");
3635 1.98 msaitoh flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3636 1.98 msaitoh }
3637 1.333 msaitoh if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
3638 1.333 msaitoh sc->osdep.mem_size, flags,
3639 1.333 msaitoh &sc->osdep.mem_bus_space_handle) != 0) {
3640 1.98 msaitoh map_err:
3641 1.333 msaitoh sc->osdep.mem_size = 0;
3642 1.98 msaitoh aprint_error_dev(dev, "unable to map BAR0\n");
3643 1.98 msaitoh return ENXIO;
3644 1.98 msaitoh }
3645 1.171 msaitoh /*
3646 1.171 msaitoh * Enable address decoding for memory range in case BIOS or
3647 1.171 msaitoh * UEFI don't set it.
3648 1.171 msaitoh */
3649 1.171 msaitoh csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3650 1.171 msaitoh PCI_COMMAND_STATUS_REG);
3651 1.171 msaitoh csr |= PCI_COMMAND_MEM_ENABLE;
3652 1.171 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3653 1.171 msaitoh csr);
3654 1.98 msaitoh break;
3655 1.98 msaitoh default:
3656 1.98 msaitoh aprint_error_dev(dev, "unexpected type on BAR0\n");
3657 1.98 msaitoh return ENXIO;
3658 1.98 msaitoh }
3659 1.1 dyoung
3660 1.98 msaitoh return (0);
3661 1.99 msaitoh } /* ixgbe_allocate_pci_resources */
3662 1.1 dyoung
3663 1.119 msaitoh static void
3664 1.333 msaitoh ixgbe_free_deferred_handlers(struct ixgbe_softc *sc)
3665 1.119 msaitoh {
3666 1.333 msaitoh struct ix_queue *que = sc->queues;
3667 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3668 1.119 msaitoh int i;
3669 1.119 msaitoh
3670 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++, txr++) {
3671 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3672 1.119 msaitoh if (txr->txr_si != NULL)
3673 1.119 msaitoh softint_disestablish(txr->txr_si);
3674 1.119 msaitoh }
3675 1.119 msaitoh if (que->que_si != NULL)
3676 1.119 msaitoh softint_disestablish(que->que_si);
3677 1.119 msaitoh }
3678 1.333 msaitoh if (sc->txr_wq != NULL)
3679 1.333 msaitoh workqueue_destroy(sc->txr_wq);
3680 1.333 msaitoh if (sc->txr_wq_enqueued != NULL)
3681 1.333 msaitoh percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
3682 1.333 msaitoh if (sc->que_wq != NULL)
3683 1.333 msaitoh workqueue_destroy(sc->que_wq);
3684 1.333 msaitoh
3685 1.333 msaitoh if (sc->admin_wq != NULL) {
3686 1.333 msaitoh workqueue_destroy(sc->admin_wq);
3687 1.333 msaitoh sc->admin_wq = NULL;
3688 1.333 msaitoh }
3689 1.333 msaitoh if (sc->timer_wq != NULL) {
3690 1.333 msaitoh workqueue_destroy(sc->timer_wq);
3691 1.333 msaitoh sc->timer_wq = NULL;
3692 1.233 msaitoh }
3693 1.333 msaitoh if (sc->recovery_mode_timer_wq != NULL) {
3694 1.236 msaitoh /*
3695 1.236 msaitoh * ixgbe_ifstop() doesn't call the workqueue_wait() for
3696 1.236 msaitoh * the recovery_mode_timer workqueue, so call it here.
3697 1.236 msaitoh */
3698 1.333 msaitoh workqueue_wait(sc->recovery_mode_timer_wq,
3699 1.333 msaitoh &sc->recovery_mode_timer_wc);
3700 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
3701 1.333 msaitoh workqueue_destroy(sc->recovery_mode_timer_wq);
3702 1.333 msaitoh sc->recovery_mode_timer_wq = NULL;
3703 1.119 msaitoh }
3704 1.257 msaitoh } /* ixgbe_free_deferred_handlers */
3705 1.119 msaitoh
3706 1.99 msaitoh /************************************************************************
3707 1.99 msaitoh * ixgbe_detach - Device removal routine
3708 1.1 dyoung *
3709 1.99 msaitoh * Called when the driver is being removed.
3710 1.99 msaitoh * Stops the adapter and deallocates all the resources
3711 1.99 msaitoh * that were allocated for driver operation.
3712 1.1 dyoung *
3713 1.99 msaitoh * return 0 on success, positive on failure
3714 1.99 msaitoh ************************************************************************/
3715 1.98 msaitoh static int
3716 1.98 msaitoh ixgbe_detach(device_t dev, int flags)
3717 1.1 dyoung {
3718 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3719 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
3720 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3721 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3722 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
3723 1.98 msaitoh u32 ctrl_ext;
3724 1.168 msaitoh int i;
3725 1.28 msaitoh
3726 1.98 msaitoh INIT_DEBUGOUT("ixgbe_detach: begin");
3727 1.333 msaitoh if (sc->osdep.attached == false)
3728 1.98 msaitoh return 0;
3729 1.26 msaitoh
3730 1.99 msaitoh if (ixgbe_pci_iov_detach(dev) != 0) {
3731 1.99 msaitoh device_printf(dev, "SR-IOV in use; detach first.\n");
3732 1.99 msaitoh return (EBUSY);
3733 1.99 msaitoh }
3734 1.99 msaitoh
3735 1.333 msaitoh if (VLAN_ATTACHED(&sc->osdep.ec) &&
3736 1.293 yamaguch (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
3737 1.99 msaitoh aprint_error_dev(dev, "VLANs in use, detach first\n");
3738 1.99 msaitoh return (EBUSY);
3739 1.26 msaitoh }
3740 1.293 yamaguch
3741 1.333 msaitoh ether_ifdetach(sc->ifp);
3742 1.24 msaitoh
3743 1.333 msaitoh sc->osdep.detaching = true;
3744 1.241 msaitoh /*
3745 1.252 msaitoh * Stop the interface. ixgbe_setup_low_power_mode() calls
3746 1.253 msaitoh * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3747 1.252 msaitoh * directly.
3748 1.241 msaitoh */
3749 1.333 msaitoh ixgbe_setup_low_power_mode(sc);
3750 1.241 msaitoh
3751 1.333 msaitoh callout_halt(&sc->timer, NULL);
3752 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3753 1.333 msaitoh callout_halt(&sc->recovery_mode_timer, NULL);
3754 1.333 msaitoh
3755 1.333 msaitoh workqueue_wait(sc->admin_wq, &sc->admin_wc);
3756 1.333 msaitoh atomic_store_relaxed(&sc->admin_pending, 0);
3757 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
3758 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
3759 1.241 msaitoh
3760 1.98 msaitoh pmf_device_deregister(dev);
3761 1.26 msaitoh
3762 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
3763 1.185 msaitoh
3764 1.98 msaitoh /* let hardware know driver is unloading */
3765 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3766 1.98 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3767 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3768 1.24 msaitoh
3769 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
3770 1.333 msaitoh netmap_detach(sc->ifp);
3771 1.99 msaitoh
3772 1.333 msaitoh ixgbe_free_pci_resources(sc);
3773 1.98 msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */
3774 1.98 msaitoh bus_generic_detach(dev);
3775 1.98 msaitoh #endif
3776 1.333 msaitoh if_detach(sc->ifp);
3777 1.333 msaitoh ifmedia_fini(&sc->media);
3778 1.333 msaitoh if_percpuq_destroy(sc->ipq);
3779 1.333 msaitoh
3780 1.333 msaitoh sysctl_teardown(&sc->sysctllog);
3781 1.333 msaitoh evcnt_detach(&sc->efbig_tx_dma_setup);
3782 1.333 msaitoh evcnt_detach(&sc->mbuf_defrag_failed);
3783 1.333 msaitoh evcnt_detach(&sc->efbig2_tx_dma_setup);
3784 1.333 msaitoh evcnt_detach(&sc->einval_tx_dma_setup);
3785 1.333 msaitoh evcnt_detach(&sc->other_tx_dma_setup);
3786 1.333 msaitoh evcnt_detach(&sc->eagain_tx_dma_setup);
3787 1.333 msaitoh evcnt_detach(&sc->enomem_tx_dma_setup);
3788 1.333 msaitoh evcnt_detach(&sc->watchdog_events);
3789 1.333 msaitoh evcnt_detach(&sc->tso_err);
3790 1.333 msaitoh evcnt_detach(&sc->admin_irqev);
3791 1.333 msaitoh evcnt_detach(&sc->link_workev);
3792 1.333 msaitoh evcnt_detach(&sc->mod_workev);
3793 1.333 msaitoh evcnt_detach(&sc->msf_workev);
3794 1.333 msaitoh evcnt_detach(&sc->phy_workev);
3795 1.1 dyoung
3796 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3797 1.98 msaitoh if (i < __arraycount(stats->mpc)) {
3798 1.98 msaitoh evcnt_detach(&stats->mpc[i]);
3799 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
3800 1.98 msaitoh evcnt_detach(&stats->rnbc[i]);
3801 1.98 msaitoh }
3802 1.98 msaitoh if (i < __arraycount(stats->pxontxc)) {
3803 1.98 msaitoh evcnt_detach(&stats->pxontxc[i]);
3804 1.98 msaitoh evcnt_detach(&stats->pxonrxc[i]);
3805 1.98 msaitoh evcnt_detach(&stats->pxofftxc[i]);
3806 1.98 msaitoh evcnt_detach(&stats->pxoffrxc[i]);
3807 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3808 1.151 msaitoh evcnt_detach(&stats->pxon2offc[i]);
3809 1.98 msaitoh }
3810 1.168 msaitoh }
3811 1.168 msaitoh
3812 1.333 msaitoh txr = sc->tx_rings;
3813 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
3814 1.333 msaitoh evcnt_detach(&sc->queues[i].irqs);
3815 1.333 msaitoh evcnt_detach(&sc->queues[i].handleq);
3816 1.333 msaitoh evcnt_detach(&sc->queues[i].req);
3817 1.168 msaitoh evcnt_detach(&txr->total_packets);
3818 1.168 msaitoh #ifndef IXGBE_LEGACY_TX
3819 1.168 msaitoh evcnt_detach(&txr->pcq_drops);
3820 1.168 msaitoh #endif
3821 1.327 msaitoh evcnt_detach(&txr->no_desc_avail);
3822 1.327 msaitoh evcnt_detach(&txr->tso_tx);
3823 1.168 msaitoh
3824 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
3825 1.98 msaitoh evcnt_detach(&stats->qprc[i]);
3826 1.98 msaitoh evcnt_detach(&stats->qptc[i]);
3827 1.98 msaitoh evcnt_detach(&stats->qbrc[i]);
3828 1.98 msaitoh evcnt_detach(&stats->qbtc[i]);
3829 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3830 1.151 msaitoh evcnt_detach(&stats->qprdc[i]);
3831 1.34 msaitoh }
3832 1.98 msaitoh
3833 1.98 msaitoh evcnt_detach(&rxr->rx_packets);
3834 1.98 msaitoh evcnt_detach(&rxr->rx_bytes);
3835 1.98 msaitoh evcnt_detach(&rxr->rx_copies);
3836 1.290 msaitoh evcnt_detach(&rxr->no_mbuf);
3837 1.98 msaitoh evcnt_detach(&rxr->rx_discarded);
3838 1.1 dyoung }
3839 1.98 msaitoh evcnt_detach(&stats->ipcs);
3840 1.98 msaitoh evcnt_detach(&stats->l4cs);
3841 1.98 msaitoh evcnt_detach(&stats->ipcs_bad);
3842 1.98 msaitoh evcnt_detach(&stats->l4cs_bad);
3843 1.98 msaitoh evcnt_detach(&stats->intzero);
3844 1.98 msaitoh evcnt_detach(&stats->legint);
3845 1.98 msaitoh evcnt_detach(&stats->crcerrs);
3846 1.98 msaitoh evcnt_detach(&stats->illerrc);
3847 1.98 msaitoh evcnt_detach(&stats->errbc);
3848 1.98 msaitoh evcnt_detach(&stats->mspdc);
3849 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3850 1.98 msaitoh evcnt_detach(&stats->mbsdc);
3851 1.98 msaitoh evcnt_detach(&stats->mpctotal);
3852 1.98 msaitoh evcnt_detach(&stats->mlfc);
3853 1.98 msaitoh evcnt_detach(&stats->mrfc);
3854 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
3855 1.326 msaitoh evcnt_detach(&stats->link_dn_cnt);
3856 1.98 msaitoh evcnt_detach(&stats->rlec);
3857 1.98 msaitoh evcnt_detach(&stats->lxontxc);
3858 1.98 msaitoh evcnt_detach(&stats->lxonrxc);
3859 1.98 msaitoh evcnt_detach(&stats->lxofftxc);
3860 1.98 msaitoh evcnt_detach(&stats->lxoffrxc);
3861 1.98 msaitoh
3862 1.98 msaitoh /* Packet Reception Stats */
3863 1.98 msaitoh evcnt_detach(&stats->tor);
3864 1.98 msaitoh evcnt_detach(&stats->gorc);
3865 1.98 msaitoh evcnt_detach(&stats->tpr);
3866 1.98 msaitoh evcnt_detach(&stats->gprc);
3867 1.98 msaitoh evcnt_detach(&stats->mprc);
3868 1.98 msaitoh evcnt_detach(&stats->bprc);
3869 1.98 msaitoh evcnt_detach(&stats->prc64);
3870 1.98 msaitoh evcnt_detach(&stats->prc127);
3871 1.98 msaitoh evcnt_detach(&stats->prc255);
3872 1.98 msaitoh evcnt_detach(&stats->prc511);
3873 1.98 msaitoh evcnt_detach(&stats->prc1023);
3874 1.98 msaitoh evcnt_detach(&stats->prc1522);
3875 1.98 msaitoh evcnt_detach(&stats->ruc);
3876 1.98 msaitoh evcnt_detach(&stats->rfc);
3877 1.98 msaitoh evcnt_detach(&stats->roc);
3878 1.98 msaitoh evcnt_detach(&stats->rjc);
3879 1.98 msaitoh evcnt_detach(&stats->mngprc);
3880 1.98 msaitoh evcnt_detach(&stats->mngpdc);
3881 1.98 msaitoh evcnt_detach(&stats->xec);
3882 1.1 dyoung
3883 1.98 msaitoh /* Packet Transmission Stats */
3884 1.98 msaitoh evcnt_detach(&stats->gotc);
3885 1.98 msaitoh evcnt_detach(&stats->tpt);
3886 1.98 msaitoh evcnt_detach(&stats->gptc);
3887 1.98 msaitoh evcnt_detach(&stats->bptc);
3888 1.98 msaitoh evcnt_detach(&stats->mptc);
3889 1.98 msaitoh evcnt_detach(&stats->mngptc);
3890 1.98 msaitoh evcnt_detach(&stats->ptc64);
3891 1.98 msaitoh evcnt_detach(&stats->ptc127);
3892 1.98 msaitoh evcnt_detach(&stats->ptc255);
3893 1.98 msaitoh evcnt_detach(&stats->ptc511);
3894 1.98 msaitoh evcnt_detach(&stats->ptc1023);
3895 1.98 msaitoh evcnt_detach(&stats->ptc1522);
3896 1.1 dyoung
3897 1.333 msaitoh ixgbe_free_queues(sc);
3898 1.333 msaitoh free(sc->mta, M_DEVBUF);
3899 1.1 dyoung
3900 1.333 msaitoh mutex_destroy(&sc->admin_mtx); /* XXX appropriate order? */
3901 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
3902 1.1 dyoung
3903 1.1 dyoung return (0);
3904 1.99 msaitoh } /* ixgbe_detach */
3905 1.1 dyoung
3906 1.99 msaitoh /************************************************************************
3907 1.99 msaitoh * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3908 1.99 msaitoh *
3909 1.99 msaitoh * Prepare the adapter/port for LPLU and/or WoL
3910 1.99 msaitoh ************************************************************************/
3911 1.1 dyoung static int
3912 1.333 msaitoh ixgbe_setup_low_power_mode(struct ixgbe_softc *sc)
3913 1.1 dyoung {
3914 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3915 1.333 msaitoh device_t dev = sc->dev;
3916 1.333 msaitoh struct ifnet *ifp = sc->ifp;
3917 1.186 msaitoh s32 error = 0;
3918 1.98 msaitoh
3919 1.98 msaitoh /* Limit power management flow to X550EM baseT */
3920 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3921 1.99 msaitoh hw->phy.ops.enter_lplu) {
3922 1.98 msaitoh /* X550EM baseT adapters need a special LPLU flow */
3923 1.98 msaitoh hw->phy.reset_disable = true;
3924 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3925 1.98 msaitoh error = hw->phy.ops.enter_lplu(hw);
3926 1.98 msaitoh if (error)
3927 1.98 msaitoh device_printf(dev,
3928 1.98 msaitoh "Error entering LPLU: %d\n", error);
3929 1.98 msaitoh hw->phy.reset_disable = false;
3930 1.98 msaitoh } else {
3931 1.98 msaitoh /* Just stop for other adapters */
3932 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3933 1.33 msaitoh }
3934 1.1 dyoung
3935 1.333 msaitoh IXGBE_CORE_LOCK(sc);
3936 1.253 msaitoh
3937 1.98 msaitoh if (!hw->wol_enabled) {
3938 1.98 msaitoh ixgbe_set_phy_power(hw, FALSE);
3939 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3940 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3941 1.98 msaitoh } else {
3942 1.98 msaitoh /* Turn off support for APM wakeup. (Using ACPI instead) */
3943 1.166 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3944 1.166 msaitoh IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3945 1.34 msaitoh
3946 1.35 msaitoh /*
3947 1.98 msaitoh * Clear Wake Up Status register to prevent any previous wakeup
3948 1.98 msaitoh * events from waking us up immediately after we suspend.
3949 1.33 msaitoh */
3950 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3951 1.98 msaitoh
3952 1.1 dyoung /*
3953 1.98 msaitoh * Program the Wakeup Filter Control register with user filter
3954 1.98 msaitoh * settings
3955 1.33 msaitoh */
3956 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3957 1.98 msaitoh
3958 1.98 msaitoh /* Enable wakeups and power management in Wakeup Control */
3959 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC,
3960 1.98 msaitoh IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3961 1.1 dyoung }
3962 1.1 dyoung
3963 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
3964 1.253 msaitoh
3965 1.98 msaitoh return error;
3966 1.99 msaitoh } /* ixgbe_setup_low_power_mode */
3967 1.98 msaitoh
3968 1.99 msaitoh /************************************************************************
3969 1.99 msaitoh * ixgbe_shutdown - Shutdown entry point
3970 1.99 msaitoh ************************************************************************/
3971 1.98 msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3972 1.98 msaitoh static int
3973 1.98 msaitoh ixgbe_shutdown(device_t dev)
3974 1.98 msaitoh {
3975 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3976 1.98 msaitoh int error = 0;
3977 1.34 msaitoh
3978 1.98 msaitoh INIT_DEBUGOUT("ixgbe_shutdown: begin");
3979 1.34 msaitoh
3980 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3981 1.1 dyoung
3982 1.98 msaitoh return (error);
3983 1.99 msaitoh } /* ixgbe_shutdown */
3984 1.98 msaitoh #endif
3985 1.1 dyoung
3986 1.99 msaitoh /************************************************************************
3987 1.99 msaitoh * ixgbe_suspend
3988 1.99 msaitoh *
3989 1.99 msaitoh * From D0 to D3
3990 1.99 msaitoh ************************************************************************/
3991 1.98 msaitoh static bool
3992 1.98 msaitoh ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3993 1.1 dyoung {
3994 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3995 1.186 msaitoh int error = 0;
3996 1.98 msaitoh
3997 1.98 msaitoh INIT_DEBUGOUT("ixgbe_suspend: begin");
3998 1.98 msaitoh
3999 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
4000 1.1 dyoung
4001 1.98 msaitoh return (error);
4002 1.99 msaitoh } /* ixgbe_suspend */
4003 1.1 dyoung
4004 1.99 msaitoh /************************************************************************
4005 1.99 msaitoh * ixgbe_resume
4006 1.99 msaitoh *
4007 1.99 msaitoh * From D3 to D0
4008 1.99 msaitoh ************************************************************************/
4009 1.98 msaitoh static bool
4010 1.98 msaitoh ixgbe_resume(device_t dev, const pmf_qual_t *qual)
4011 1.98 msaitoh {
4012 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
4013 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4014 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4015 1.186 msaitoh u32 wus;
4016 1.1 dyoung
4017 1.98 msaitoh INIT_DEBUGOUT("ixgbe_resume: begin");
4018 1.33 msaitoh
4019 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4020 1.43 msaitoh
4021 1.98 msaitoh /* Read & clear WUS register */
4022 1.98 msaitoh wus = IXGBE_READ_REG(hw, IXGBE_WUS);
4023 1.98 msaitoh if (wus)
4024 1.98 msaitoh device_printf(dev, "Woken up by (WUS): %#010x\n",
4025 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_WUS));
4026 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4027 1.98 msaitoh /* And clear WUFC until next low-power transition */
4028 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4029 1.1 dyoung
4030 1.1 dyoung /*
4031 1.98 msaitoh * Required after D3->D0 transition;
4032 1.98 msaitoh * will re-advertise all previous advertised speeds
4033 1.98 msaitoh */
4034 1.98 msaitoh if (ifp->if_flags & IFF_UP)
4035 1.333 msaitoh ixgbe_init_locked(sc);
4036 1.34 msaitoh
4037 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4038 1.1 dyoung
4039 1.98 msaitoh return true;
4040 1.99 msaitoh } /* ixgbe_resume */
4041 1.1 dyoung
4042 1.98 msaitoh /*
4043 1.98 msaitoh * Set the various hardware offload abilities.
4044 1.98 msaitoh *
4045 1.98 msaitoh * This takes the ifnet's if_capenable flags (e.g. set by the user using
4046 1.98 msaitoh * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
4047 1.98 msaitoh * mbuf offload flags the driver will understand.
4048 1.98 msaitoh */
4049 1.1 dyoung static void
4050 1.333 msaitoh ixgbe_set_if_hwassist(struct ixgbe_softc *sc)
4051 1.1 dyoung {
4052 1.98 msaitoh /* XXX */
4053 1.1 dyoung }
4054 1.1 dyoung
4055 1.99 msaitoh /************************************************************************
4056 1.99 msaitoh * ixgbe_init_locked - Init entry point
4057 1.99 msaitoh *
4058 1.99 msaitoh * Used in two ways: It is used by the stack as an init
4059 1.99 msaitoh * entry point in network interface structure. It is also
4060 1.99 msaitoh * used by the driver as a hw/sw initialization routine to
4061 1.99 msaitoh * get to a consistent state.
4062 1.1 dyoung *
4063 1.99 msaitoh * return 0 on success, positive on failure
4064 1.99 msaitoh ************************************************************************/
4065 1.98 msaitoh static void
4066 1.333 msaitoh ixgbe_init_locked(struct ixgbe_softc *sc)
4067 1.1 dyoung {
4068 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4069 1.333 msaitoh device_t dev = sc->dev;
4070 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4071 1.157 msaitoh struct ix_queue *que;
4072 1.186 msaitoh struct tx_ring *txr;
4073 1.186 msaitoh struct rx_ring *rxr;
4074 1.98 msaitoh u32 txdctl, mhadd;
4075 1.98 msaitoh u32 rxdctl, rxctrl;
4076 1.186 msaitoh u32 ctrl_ext;
4077 1.219 msaitoh bool unsupported_sfp = false;
4078 1.283 msaitoh int i, j, error;
4079 1.1 dyoung
4080 1.98 msaitoh /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
4081 1.1 dyoung
4082 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4083 1.98 msaitoh INIT_DEBUGOUT("ixgbe_init_locked: begin");
4084 1.1 dyoung
4085 1.219 msaitoh hw->need_unsupported_sfp_recovery = false;
4086 1.98 msaitoh hw->adapter_stopped = FALSE;
4087 1.98 msaitoh ixgbe_stop_adapter(hw);
4088 1.333 msaitoh callout_stop(&sc->timer);
4089 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4090 1.333 msaitoh callout_stop(&sc->recovery_mode_timer);
4091 1.333 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
4092 1.157 msaitoh que->disabled_count = 0;
4093 1.1 dyoung
4094 1.98 msaitoh /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4095 1.333 msaitoh sc->max_frame_size =
4096 1.98 msaitoh ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4097 1.1 dyoung
4098 1.98 msaitoh /* Queue indices may change with IOV mode */
4099 1.333 msaitoh ixgbe_align_all_queue_indices(sc);
4100 1.99 msaitoh
4101 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
4102 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
4103 1.1 dyoung
4104 1.98 msaitoh /* Get the latest mac address, User can use a LAA */
4105 1.98 msaitoh memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4106 1.98 msaitoh IXGBE_ETH_LENGTH_OF_ADDRESS);
4107 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
4108 1.98 msaitoh hw->addr_ctrl.rar_used_count = 1;
4109 1.1 dyoung
4110 1.98 msaitoh /* Set hardware offload abilities from ifnet flags */
4111 1.333 msaitoh ixgbe_set_if_hwassist(sc);
4112 1.48 msaitoh
4113 1.98 msaitoh /* Prepare transmit descriptors and buffers */
4114 1.333 msaitoh if (ixgbe_setup_transmit_structures(sc)) {
4115 1.98 msaitoh device_printf(dev, "Could not setup transmit structures\n");
4116 1.333 msaitoh ixgbe_stop_locked(sc);
4117 1.98 msaitoh return;
4118 1.98 msaitoh }
4119 1.1 dyoung
4120 1.98 msaitoh ixgbe_init_hw(hw);
4121 1.144 msaitoh
4122 1.333 msaitoh ixgbe_initialize_iov(sc);
4123 1.144 msaitoh
4124 1.333 msaitoh ixgbe_initialize_transmit_units(sc);
4125 1.1 dyoung
4126 1.98 msaitoh /* Setup Multicast table */
4127 1.333 msaitoh ixgbe_set_rxfilter(sc);
4128 1.43 msaitoh
4129 1.289 msaitoh /* Use fixed buffer size, even for jumbo frames */
4130 1.333 msaitoh sc->rx_mbuf_sz = MCLBYTES;
4131 1.43 msaitoh
4132 1.98 msaitoh /* Prepare receive descriptors and buffers */
4133 1.333 msaitoh error = ixgbe_setup_receive_structures(sc);
4134 1.283 msaitoh if (error) {
4135 1.283 msaitoh device_printf(dev,
4136 1.283 msaitoh "Could not setup receive structures (err = %d)\n", error);
4137 1.333 msaitoh ixgbe_stop_locked(sc);
4138 1.98 msaitoh return;
4139 1.98 msaitoh }
4140 1.43 msaitoh
4141 1.98 msaitoh /* Configure RX settings */
4142 1.333 msaitoh ixgbe_initialize_receive_units(sc);
4143 1.43 msaitoh
4144 1.233 msaitoh /* Initialize variable holding task enqueue requests interrupts */
4145 1.333 msaitoh sc->task_requests = 0;
4146 1.233 msaitoh
4147 1.99 msaitoh /* Enable SDP & MSI-X interrupts based on adapter */
4148 1.333 msaitoh ixgbe_config_gpie(sc);
4149 1.43 msaitoh
4150 1.98 msaitoh /* Set MTU size */
4151 1.98 msaitoh if (ifp->if_mtu > ETHERMTU) {
4152 1.98 msaitoh /* aka IXGBE_MAXFRS on 82599 and newer */
4153 1.98 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4154 1.98 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
4155 1.333 msaitoh mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4156 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4157 1.55 msaitoh }
4158 1.55 msaitoh
4159 1.98 msaitoh /* Now enable all the queues */
4160 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4161 1.333 msaitoh txr = &sc->tx_rings[i];
4162 1.98 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4163 1.98 msaitoh txdctl |= IXGBE_TXDCTL_ENABLE;
4164 1.98 msaitoh /* Set WTHRESH to 8, burst writeback */
4165 1.348 msaitoh txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK;
4166 1.292 msaitoh txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4167 1.98 msaitoh /*
4168 1.98 msaitoh * When the internal queue falls below PTHRESH (32),
4169 1.98 msaitoh * start prefetching as long as there are at least
4170 1.98 msaitoh * HTHRESH (1) buffers ready. The values are taken
4171 1.98 msaitoh * from the Intel linux driver 3.8.21.
4172 1.98 msaitoh * Prefetching enables tx line rate even with 1 queue.
4173 1.98 msaitoh */
4174 1.98 msaitoh txdctl |= (32 << 0) | (1 << 8);
4175 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4176 1.55 msaitoh }
4177 1.43 msaitoh
4178 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4179 1.333 msaitoh rxr = &sc->rx_rings[i];
4180 1.98 msaitoh rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4181 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4182 1.98 msaitoh /*
4183 1.99 msaitoh * PTHRESH = 21
4184 1.99 msaitoh * HTHRESH = 4
4185 1.99 msaitoh * WTHRESH = 8
4186 1.99 msaitoh */
4187 1.98 msaitoh rxdctl &= ~0x3FFFFF;
4188 1.98 msaitoh rxdctl |= 0x080420;
4189 1.98 msaitoh }
4190 1.98 msaitoh rxdctl |= IXGBE_RXDCTL_ENABLE;
4191 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4192 1.144 msaitoh for (j = 0; j < 10; j++) {
4193 1.98 msaitoh if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4194 1.98 msaitoh IXGBE_RXDCTL_ENABLE)
4195 1.98 msaitoh break;
4196 1.98 msaitoh else
4197 1.98 msaitoh msec_delay(1);
4198 1.55 msaitoh }
4199 1.217 msaitoh IXGBE_WRITE_BARRIER(hw);
4200 1.99 msaitoh
4201 1.98 msaitoh /*
4202 1.98 msaitoh * In netmap mode, we must preserve the buffers made
4203 1.98 msaitoh * available to userspace before the if_init()
4204 1.98 msaitoh * (this is true by default on the TX side, because
4205 1.98 msaitoh * init makes all buffers available to userspace).
4206 1.98 msaitoh *
4207 1.98 msaitoh * netmap_reset() and the device specific routines
4208 1.98 msaitoh * (e.g. ixgbe_setup_receive_rings()) map these
4209 1.98 msaitoh * buffers at the end of the NIC ring, so here we
4210 1.98 msaitoh * must set the RDT (tail) register to make sure
4211 1.98 msaitoh * they are not overwritten.
4212 1.98 msaitoh *
4213 1.98 msaitoh * In this driver the NIC ring starts at RDH = 0,
4214 1.98 msaitoh * RDT points to the last slot available for reception (?),
4215 1.98 msaitoh * so RDT = num_rx_desc - 1 means the whole ring is available.
4216 1.98 msaitoh */
4217 1.99 msaitoh #ifdef DEV_NETMAP
4218 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
4219 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP)) {
4220 1.333 msaitoh struct netmap_adapter *na = NA(sc->ifp);
4221 1.189 msaitoh struct netmap_kring *kring = na->rx_rings[i];
4222 1.98 msaitoh int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4223 1.98 msaitoh
4224 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4225 1.98 msaitoh } else
4226 1.98 msaitoh #endif /* DEV_NETMAP */
4227 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4228 1.333 msaitoh sc->num_rx_desc - 1);
4229 1.48 msaitoh }
4230 1.98 msaitoh
4231 1.98 msaitoh /* Enable Receive engine */
4232 1.98 msaitoh rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4233 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4234 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_DMBYPS;
4235 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_RXEN;
4236 1.98 msaitoh ixgbe_enable_rx_dma(hw, rxctrl);
4237 1.98 msaitoh
4238 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4239 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4240 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4241 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4242 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4243 1.98 msaitoh
4244 1.144 msaitoh /* Set up MSI/MSI-X routing */
4245 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4246 1.333 msaitoh ixgbe_configure_ivars(sc);
4247 1.98 msaitoh /* Set up auto-mask */
4248 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4249 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4250 1.98 msaitoh else {
4251 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4252 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4253 1.55 msaitoh }
4254 1.98 msaitoh } else { /* Simple settings for Legacy/MSI */
4255 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 0);
4256 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 1);
4257 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4258 1.55 msaitoh }
4259 1.43 msaitoh
4260 1.333 msaitoh ixgbe_init_fdir(sc);
4261 1.98 msaitoh
4262 1.98 msaitoh /*
4263 1.98 msaitoh * Check on any SFP devices that
4264 1.98 msaitoh * need to be kick-started
4265 1.98 msaitoh */
4266 1.98 msaitoh if (hw->phy.type == ixgbe_phy_none) {
4267 1.283 msaitoh error = hw->phy.ops.identify(hw);
4268 1.283 msaitoh if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4269 1.219 msaitoh unsupported_sfp = true;
4270 1.219 msaitoh } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4271 1.219 msaitoh unsupported_sfp = true;
4272 1.219 msaitoh
4273 1.219 msaitoh if (unsupported_sfp)
4274 1.219 msaitoh device_printf(dev,
4275 1.219 msaitoh "Unsupported SFP+ module type was detected.\n");
4276 1.98 msaitoh
4277 1.98 msaitoh /* Set moderation on the Link interrupt */
4278 1.333 msaitoh ixgbe_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
4279 1.98 msaitoh
4280 1.173 msaitoh /* Enable EEE power saving */
4281 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
4282 1.173 msaitoh hw->mac.ops.setup_eee(hw,
4283 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
4284 1.173 msaitoh
4285 1.144 msaitoh /* Enable power to the phy. */
4286 1.219 msaitoh if (!unsupported_sfp) {
4287 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
4288 1.144 msaitoh
4289 1.219 msaitoh /* Config/Enable Link */
4290 1.333 msaitoh ixgbe_config_link(sc);
4291 1.219 msaitoh }
4292 1.55 msaitoh
4293 1.98 msaitoh /* Hardware Packet Buffer & Flow Control setup */
4294 1.333 msaitoh ixgbe_config_delay_values(sc);
4295 1.1 dyoung
4296 1.98 msaitoh /* Initialize the FC settings */
4297 1.98 msaitoh ixgbe_start_hw(hw);
4298 1.1 dyoung
4299 1.98 msaitoh /* Set up VLAN support and filter */
4300 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
4301 1.1 dyoung
4302 1.98 msaitoh /* Setup DMA Coalescing */
4303 1.333 msaitoh ixgbe_config_dmac(sc);
4304 1.98 msaitoh
4305 1.230 msaitoh /* OK to schedule workqueues. */
4306 1.333 msaitoh sc->schedule_wqs_ok = true;
4307 1.230 msaitoh
4308 1.98 msaitoh /* Enable the use of the MBX by the VF's */
4309 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
4310 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4311 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4312 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4313 1.1 dyoung }
4314 1.98 msaitoh
4315 1.123 msaitoh /* Update saved flags. See ixgbe_ifflags_cb() */
4316 1.333 msaitoh sc->if_flags = ifp->if_flags;
4317 1.333 msaitoh sc->ec_capenable = sc->osdep.ec.ec_capenable;
4318 1.123 msaitoh
4319 1.337 msaitoh /* Inform the stack we're ready */
4320 1.98 msaitoh ifp->if_flags |= IFF_RUNNING;
4321 1.98 msaitoh
4322 1.337 msaitoh /* And now turn on interrupts */
4323 1.337 msaitoh ixgbe_enable_intr(sc);
4324 1.337 msaitoh
4325 1.1 dyoung return;
4326 1.99 msaitoh } /* ixgbe_init_locked */
4327 1.1 dyoung
4328 1.99 msaitoh /************************************************************************
4329 1.99 msaitoh * ixgbe_init
4330 1.99 msaitoh ************************************************************************/
4331 1.98 msaitoh static int
4332 1.98 msaitoh ixgbe_init(struct ifnet *ifp)
4333 1.98 msaitoh {
4334 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
4335 1.98 msaitoh
4336 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4337 1.333 msaitoh ixgbe_init_locked(sc);
4338 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4339 1.98 msaitoh
4340 1.98 msaitoh return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4341 1.99 msaitoh } /* ixgbe_init */
4342 1.43 msaitoh
4343 1.99 msaitoh /************************************************************************
4344 1.99 msaitoh * ixgbe_set_ivar
4345 1.99 msaitoh *
4346 1.99 msaitoh * Setup the correct IVAR register for a particular MSI-X interrupt
4347 1.99 msaitoh * (yes this is all very magic and confusing :)
4348 1.99 msaitoh * - entry is the register array entry
4349 1.99 msaitoh * - vector is the MSI-X vector for this queue
4350 1.99 msaitoh * - type is RX/TX/MISC
4351 1.99 msaitoh ************************************************************************/
4352 1.42 msaitoh static void
4353 1.333 msaitoh ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
4354 1.1 dyoung {
4355 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4356 1.98 msaitoh u32 ivar, index;
4357 1.98 msaitoh
4358 1.98 msaitoh vector |= IXGBE_IVAR_ALLOC_VAL;
4359 1.98 msaitoh
4360 1.98 msaitoh switch (hw->mac.type) {
4361 1.98 msaitoh case ixgbe_mac_82598EB:
4362 1.98 msaitoh if (type == -1)
4363 1.98 msaitoh entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4364 1.98 msaitoh else
4365 1.98 msaitoh entry += (type * 64);
4366 1.98 msaitoh index = (entry >> 2) & 0x1F;
4367 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4368 1.198 msaitoh ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4369 1.198 msaitoh ivar |= ((u32)vector << (8 * (entry & 0x3)));
4370 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
4371 1.98 msaitoh break;
4372 1.98 msaitoh case ixgbe_mac_82599EB:
4373 1.98 msaitoh case ixgbe_mac_X540:
4374 1.98 msaitoh case ixgbe_mac_X550:
4375 1.98 msaitoh case ixgbe_mac_X550EM_x:
4376 1.99 msaitoh case ixgbe_mac_X550EM_a:
4377 1.98 msaitoh if (type == -1) { /* MISC IVAR */
4378 1.98 msaitoh index = (entry & 1) * 8;
4379 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4380 1.194 msaitoh ivar &= ~(0xffUL << index);
4381 1.194 msaitoh ivar |= ((u32)vector << index);
4382 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4383 1.98 msaitoh } else { /* RX/TX IVARS */
4384 1.98 msaitoh index = (16 * (entry & 1)) + (8 * type);
4385 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4386 1.194 msaitoh ivar &= ~(0xffUL << index);
4387 1.194 msaitoh ivar |= ((u32)vector << index);
4388 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4389 1.98 msaitoh }
4390 1.135 msaitoh break;
4391 1.98 msaitoh default:
4392 1.98 msaitoh break;
4393 1.98 msaitoh }
4394 1.99 msaitoh } /* ixgbe_set_ivar */
4395 1.1 dyoung
4396 1.99 msaitoh /************************************************************************
4397 1.99 msaitoh * ixgbe_configure_ivars
4398 1.99 msaitoh ************************************************************************/
4399 1.98 msaitoh static void
4400 1.333 msaitoh ixgbe_configure_ivars(struct ixgbe_softc *sc)
4401 1.98 msaitoh {
4402 1.333 msaitoh struct ix_queue *que = sc->queues;
4403 1.186 msaitoh u32 newitr;
4404 1.1 dyoung
4405 1.343 msaitoh if (sc->max_interrupt_rate > 0)
4406 1.343 msaitoh newitr = (4000000 / sc->max_interrupt_rate) & 0x0FF8;
4407 1.98 msaitoh else {
4408 1.48 msaitoh /*
4409 1.99 msaitoh * Disable DMA coalescing if interrupt moderation is
4410 1.99 msaitoh * disabled.
4411 1.99 msaitoh */
4412 1.333 msaitoh sc->dmac = 0;
4413 1.98 msaitoh newitr = 0;
4414 1.98 msaitoh }
4415 1.98 msaitoh
4416 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
4417 1.333 msaitoh struct rx_ring *rxr = &sc->rx_rings[i];
4418 1.333 msaitoh struct tx_ring *txr = &sc->tx_rings[i];
4419 1.98 msaitoh /* First the RX queue entry */
4420 1.333 msaitoh ixgbe_set_ivar(sc, rxr->me, que->msix, 0);
4421 1.98 msaitoh /* ... and the TX */
4422 1.333 msaitoh ixgbe_set_ivar(sc, txr->me, que->msix, 1);
4423 1.98 msaitoh /* Set an Initial EITR value */
4424 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, newitr);
4425 1.138 knakahar /*
4426 1.138 knakahar * To eliminate influence of the previous state.
4427 1.138 knakahar * At this point, Tx/Rx interrupt handler
4428 1.138 knakahar * (ixgbe_msix_que()) cannot be called, so both
4429 1.138 knakahar * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4430 1.138 knakahar */
4431 1.138 knakahar que->eitr_setting = 0;
4432 1.98 msaitoh }
4433 1.98 msaitoh
4434 1.98 msaitoh /* For the Link interrupt */
4435 1.333 msaitoh ixgbe_set_ivar(sc, 1, sc->vector, -1);
4436 1.99 msaitoh } /* ixgbe_configure_ivars */
4437 1.98 msaitoh
4438 1.99 msaitoh /************************************************************************
4439 1.99 msaitoh * ixgbe_config_gpie
4440 1.99 msaitoh ************************************************************************/
4441 1.98 msaitoh static void
4442 1.333 msaitoh ixgbe_config_gpie(struct ixgbe_softc *sc)
4443 1.98 msaitoh {
4444 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4445 1.186 msaitoh u32 gpie;
4446 1.98 msaitoh
4447 1.98 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4448 1.98 msaitoh
4449 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4450 1.99 msaitoh /* Enable Enhanced MSI-X mode */
4451 1.99 msaitoh gpie |= IXGBE_GPIE_MSIX_MODE
4452 1.186 msaitoh | IXGBE_GPIE_EIAME
4453 1.186 msaitoh | IXGBE_GPIE_PBA_SUPPORT
4454 1.186 msaitoh | IXGBE_GPIE_OCD;
4455 1.99 msaitoh }
4456 1.99 msaitoh
4457 1.98 msaitoh /* Fan Failure Interrupt */
4458 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4459 1.98 msaitoh gpie |= IXGBE_SDP1_GPIEN;
4460 1.1 dyoung
4461 1.99 msaitoh /* Thermal Sensor Interrupt */
4462 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4463 1.99 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4464 1.1 dyoung
4465 1.99 msaitoh /* Link detection */
4466 1.99 msaitoh switch (hw->mac.type) {
4467 1.99 msaitoh case ixgbe_mac_82599EB:
4468 1.99 msaitoh gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4469 1.99 msaitoh break;
4470 1.99 msaitoh case ixgbe_mac_X550EM_x:
4471 1.99 msaitoh case ixgbe_mac_X550EM_a:
4472 1.98 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4473 1.99 msaitoh break;
4474 1.99 msaitoh default:
4475 1.99 msaitoh break;
4476 1.1 dyoung }
4477 1.1 dyoung
4478 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4479 1.98 msaitoh
4480 1.99 msaitoh } /* ixgbe_config_gpie */
4481 1.1 dyoung
4482 1.99 msaitoh /************************************************************************
4483 1.99 msaitoh * ixgbe_config_delay_values
4484 1.99 msaitoh *
4485 1.333 msaitoh * Requires sc->max_frame_size to be set.
4486 1.99 msaitoh ************************************************************************/
4487 1.33 msaitoh static void
4488 1.333 msaitoh ixgbe_config_delay_values(struct ixgbe_softc *sc)
4489 1.33 msaitoh {
4490 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4491 1.186 msaitoh u32 rxpb, frame, size, tmp;
4492 1.33 msaitoh
4493 1.333 msaitoh frame = sc->max_frame_size;
4494 1.33 msaitoh
4495 1.98 msaitoh /* Calculate High Water */
4496 1.98 msaitoh switch (hw->mac.type) {
4497 1.98 msaitoh case ixgbe_mac_X540:
4498 1.44 msaitoh case ixgbe_mac_X550:
4499 1.44 msaitoh case ixgbe_mac_X550EM_x:
4500 1.99 msaitoh case ixgbe_mac_X550EM_a:
4501 1.98 msaitoh tmp = IXGBE_DV_X540(frame, frame);
4502 1.44 msaitoh break;
4503 1.44 msaitoh default:
4504 1.98 msaitoh tmp = IXGBE_DV(frame, frame);
4505 1.44 msaitoh break;
4506 1.44 msaitoh }
4507 1.98 msaitoh size = IXGBE_BT2KB(tmp);
4508 1.98 msaitoh rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4509 1.98 msaitoh hw->fc.high_water[0] = rxpb - size;
4510 1.44 msaitoh
4511 1.98 msaitoh /* Now calculate Low Water */
4512 1.98 msaitoh switch (hw->mac.type) {
4513 1.98 msaitoh case ixgbe_mac_X540:
4514 1.98 msaitoh case ixgbe_mac_X550:
4515 1.98 msaitoh case ixgbe_mac_X550EM_x:
4516 1.99 msaitoh case ixgbe_mac_X550EM_a:
4517 1.98 msaitoh tmp = IXGBE_LOW_DV_X540(frame);
4518 1.98 msaitoh break;
4519 1.98 msaitoh default:
4520 1.98 msaitoh tmp = IXGBE_LOW_DV(frame);
4521 1.98 msaitoh break;
4522 1.33 msaitoh }
4523 1.98 msaitoh hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4524 1.33 msaitoh
4525 1.98 msaitoh hw->fc.pause_time = IXGBE_FC_PAUSE;
4526 1.98 msaitoh hw->fc.send_xon = TRUE;
4527 1.99 msaitoh } /* ixgbe_config_delay_values */
4528 1.33 msaitoh
4529 1.99 msaitoh /************************************************************************
4530 1.213 msaitoh * ixgbe_set_rxfilter - Multicast Update
4531 1.1 dyoung *
4532 1.99 msaitoh * Called whenever multicast address list is updated.
4533 1.99 msaitoh ************************************************************************/
4534 1.1 dyoung static void
4535 1.333 msaitoh ixgbe_set_rxfilter(struct ixgbe_softc *sc)
4536 1.1 dyoung {
4537 1.99 msaitoh struct ixgbe_mc_addr *mta;
4538 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4539 1.98 msaitoh u8 *update_ptr;
4540 1.98 msaitoh int mcnt = 0;
4541 1.99 msaitoh u32 fctrl;
4542 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
4543 1.98 msaitoh struct ether_multi *enm;
4544 1.98 msaitoh struct ether_multistep step;
4545 1.98 msaitoh
4546 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4547 1.213 msaitoh IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4548 1.98 msaitoh
4549 1.333 msaitoh mta = sc->mta;
4550 1.98 msaitoh bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4551 1.1 dyoung
4552 1.105 msaitoh ETHER_LOCK(ec);
4553 1.183 ozaki ec->ec_flags &= ~ETHER_F_ALLMULTI;
4554 1.98 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
4555 1.98 msaitoh while (enm != NULL) {
4556 1.98 msaitoh if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4557 1.98 msaitoh (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4558 1.98 msaitoh ETHER_ADDR_LEN) != 0)) {
4559 1.183 ozaki ec->ec_flags |= ETHER_F_ALLMULTI;
4560 1.98 msaitoh break;
4561 1.98 msaitoh }
4562 1.98 msaitoh bcopy(enm->enm_addrlo,
4563 1.98 msaitoh mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4564 1.333 msaitoh mta[mcnt].vmdq = sc->pool;
4565 1.98 msaitoh mcnt++;
4566 1.98 msaitoh ETHER_NEXT_MULTI(step, enm);
4567 1.98 msaitoh }
4568 1.1 dyoung
4569 1.333 msaitoh fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
4570 1.98 msaitoh if (ifp->if_flags & IFF_PROMISC)
4571 1.98 msaitoh fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4572 1.183 ozaki else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4573 1.98 msaitoh fctrl |= IXGBE_FCTRL_MPE;
4574 1.212 msaitoh fctrl &= ~IXGBE_FCTRL_UPE;
4575 1.212 msaitoh } else
4576 1.212 msaitoh fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4577 1.1 dyoung
4578 1.211 msaitoh /* Update multicast filter entries only when it's not ALLMULTI */
4579 1.211 msaitoh if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4580 1.211 msaitoh ETHER_UNLOCK(ec);
4581 1.98 msaitoh update_ptr = (u8 *)mta;
4582 1.333 msaitoh ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
4583 1.99 msaitoh ixgbe_mc_array_itr, TRUE);
4584 1.211 msaitoh } else
4585 1.211 msaitoh ETHER_UNLOCK(ec);
4586 1.332 msaitoh
4587 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
4588 1.213 msaitoh } /* ixgbe_set_rxfilter */
4589 1.1 dyoung
4590 1.99 msaitoh /************************************************************************
4591 1.99 msaitoh * ixgbe_mc_array_itr
4592 1.99 msaitoh *
4593 1.99 msaitoh * An iterator function needed by the multicast shared code.
4594 1.99 msaitoh * It feeds the shared code routine the addresses in the
4595 1.213 msaitoh * array of ixgbe_set_rxfilter() one by one.
4596 1.99 msaitoh ************************************************************************/
4597 1.98 msaitoh static u8 *
4598 1.98 msaitoh ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4599 1.98 msaitoh {
4600 1.98 msaitoh struct ixgbe_mc_addr *mta;
4601 1.1 dyoung
4602 1.98 msaitoh mta = (struct ixgbe_mc_addr *)*update_ptr;
4603 1.98 msaitoh *vmdq = mta->vmdq;
4604 1.33 msaitoh
4605 1.98 msaitoh *update_ptr = (u8*)(mta + 1);
4606 1.99 msaitoh
4607 1.98 msaitoh return (mta->addr);
4608 1.99 msaitoh } /* ixgbe_mc_array_itr */
4609 1.82 msaitoh
4610 1.99 msaitoh /************************************************************************
4611 1.99 msaitoh * ixgbe_local_timer - Timer routine
4612 1.98 msaitoh *
4613 1.99 msaitoh * Checks for link status, updates statistics,
4614 1.99 msaitoh * and runs the watchdog check.
4615 1.99 msaitoh ************************************************************************/
4616 1.98 msaitoh static void
4617 1.98 msaitoh ixgbe_local_timer(void *arg)
4618 1.98 msaitoh {
4619 1.333 msaitoh struct ixgbe_softc *sc = arg;
4620 1.1 dyoung
4621 1.333 msaitoh if (sc->schedule_wqs_ok) {
4622 1.333 msaitoh if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
4623 1.333 msaitoh workqueue_enqueue(sc->timer_wq,
4624 1.333 msaitoh &sc->timer_wc, NULL);
4625 1.233 msaitoh }
4626 1.98 msaitoh }
4627 1.28 msaitoh
4628 1.98 msaitoh static void
4629 1.233 msaitoh ixgbe_handle_timer(struct work *wk, void *context)
4630 1.98 msaitoh {
4631 1.339 msaitoh struct ixgbe_softc *sc = context;
4632 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4633 1.333 msaitoh device_t dev = sc->dev;
4634 1.333 msaitoh struct ix_queue *que = sc->queues;
4635 1.153 msaitoh u64 queues = 0;
4636 1.134 msaitoh u64 v0, v1, v2, v3, v4, v5, v6, v7;
4637 1.153 msaitoh int hung = 0;
4638 1.134 msaitoh int i;
4639 1.1 dyoung
4640 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4641 1.1 dyoung
4642 1.98 msaitoh /* Check for pluggable optics */
4643 1.237 msaitoh if (ixgbe_is_sfp(hw)) {
4644 1.249 msaitoh bool sched_mod_task = false;
4645 1.237 msaitoh
4646 1.249 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4647 1.249 msaitoh /*
4648 1.249 msaitoh * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4649 1.250 msaitoh * any GPIO(SDP). So just schedule TASK_MOD.
4650 1.249 msaitoh */
4651 1.249 msaitoh sched_mod_task = true;
4652 1.249 msaitoh } else {
4653 1.249 msaitoh bool was_full, is_full;
4654 1.249 msaitoh
4655 1.249 msaitoh was_full =
4656 1.249 msaitoh hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4657 1.251 msaitoh is_full = ixgbe_sfp_cage_full(hw);
4658 1.249 msaitoh
4659 1.249 msaitoh /* Do probe if cage state changed */
4660 1.249 msaitoh if (was_full ^ is_full)
4661 1.249 msaitoh sched_mod_task = true;
4662 1.249 msaitoh }
4663 1.249 msaitoh if (sched_mod_task) {
4664 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4665 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4666 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
4667 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4668 1.239 msaitoh }
4669 1.237 msaitoh }
4670 1.1 dyoung
4671 1.333 msaitoh ixgbe_update_link_status(sc);
4672 1.333 msaitoh ixgbe_update_stats_counters(sc);
4673 1.33 msaitoh
4674 1.134 msaitoh /* Update some event counters */
4675 1.134 msaitoh v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4676 1.333 msaitoh que = sc->queues;
4677 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4678 1.186 msaitoh struct tx_ring *txr = que->txr;
4679 1.134 msaitoh
4680 1.134 msaitoh v0 += txr->q_efbig_tx_dma_setup;
4681 1.134 msaitoh v1 += txr->q_mbuf_defrag_failed;
4682 1.134 msaitoh v2 += txr->q_efbig2_tx_dma_setup;
4683 1.134 msaitoh v3 += txr->q_einval_tx_dma_setup;
4684 1.134 msaitoh v4 += txr->q_other_tx_dma_setup;
4685 1.134 msaitoh v5 += txr->q_eagain_tx_dma_setup;
4686 1.134 msaitoh v6 += txr->q_enomem_tx_dma_setup;
4687 1.134 msaitoh v7 += txr->q_tso_err;
4688 1.134 msaitoh }
4689 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
4690 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
4691 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
4692 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
4693 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
4694 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
4695 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
4696 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, v7);
4697 1.134 msaitoh
4698 1.153 msaitoh /*
4699 1.153 msaitoh * Check the TX queues status
4700 1.186 msaitoh * - mark hung queues so we don't schedule on them
4701 1.186 msaitoh * - watchdog only if all queues show hung
4702 1.153 msaitoh */
4703 1.333 msaitoh que = sc->queues;
4704 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4705 1.153 msaitoh /* Keep track of queues with work for soft irq */
4706 1.153 msaitoh if (que->txr->busy)
4707 1.190 msaitoh queues |= 1ULL << que->me;
4708 1.153 msaitoh /*
4709 1.153 msaitoh * Each time txeof runs without cleaning, but there
4710 1.153 msaitoh * are uncleaned descriptors it increments busy. If
4711 1.153 msaitoh * we get to the MAX we declare it hung.
4712 1.153 msaitoh */
4713 1.153 msaitoh if (que->busy == IXGBE_QUEUE_HUNG) {
4714 1.153 msaitoh ++hung;
4715 1.153 msaitoh /* Mark the queue as inactive */
4716 1.333 msaitoh sc->active_queues &= ~(1ULL << que->me);
4717 1.153 msaitoh continue;
4718 1.153 msaitoh } else {
4719 1.153 msaitoh /* Check if we've come back from hung */
4720 1.333 msaitoh if ((sc->active_queues & (1ULL << que->me)) == 0)
4721 1.333 msaitoh sc->active_queues |= 1ULL << que->me;
4722 1.153 msaitoh }
4723 1.153 msaitoh if (que->busy >= IXGBE_MAX_TX_BUSY) {
4724 1.153 msaitoh device_printf(dev,
4725 1.153 msaitoh "Warning queue %d appears to be hung!\n", i);
4726 1.153 msaitoh que->txr->busy = IXGBE_QUEUE_HUNG;
4727 1.153 msaitoh ++hung;
4728 1.153 msaitoh }
4729 1.150 msaitoh }
4730 1.150 msaitoh
4731 1.232 msaitoh /* Only truly watchdog if all queues show hung */
4732 1.333 msaitoh if (hung == sc->num_queues)
4733 1.153 msaitoh goto watchdog;
4734 1.160 msaitoh #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4735 1.153 msaitoh else if (queues != 0) { /* Force an IRQ on queues with work */
4736 1.333 msaitoh que = sc->queues;
4737 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4738 1.139 knakahar mutex_enter(&que->dc_mtx);
4739 1.153 msaitoh if (que->disabled_count == 0)
4740 1.333 msaitoh ixgbe_rearm_queues(sc,
4741 1.153 msaitoh queues & ((u64)1 << i));
4742 1.139 knakahar mutex_exit(&que->dc_mtx);
4743 1.131 knakahar }
4744 1.98 msaitoh }
4745 1.160 msaitoh #endif
4746 1.150 msaitoh
4747 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4748 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4749 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4750 1.153 msaitoh return;
4751 1.1 dyoung
4752 1.153 msaitoh watchdog:
4753 1.333 msaitoh device_printf(sc->dev, "Watchdog timeout -- resetting\n");
4754 1.333 msaitoh sc->ifp->if_flags &= ~IFF_RUNNING;
4755 1.333 msaitoh IXGBE_EVC_ADD(&sc->watchdog_events, 1);
4756 1.333 msaitoh ixgbe_init_locked(sc);
4757 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4758 1.233 msaitoh } /* ixgbe_handle_timer */
4759 1.43 msaitoh
4760 1.99 msaitoh /************************************************************************
4761 1.169 msaitoh * ixgbe_recovery_mode_timer - Recovery mode timer routine
4762 1.169 msaitoh ************************************************************************/
4763 1.169 msaitoh static void
4764 1.169 msaitoh ixgbe_recovery_mode_timer(void *arg)
4765 1.169 msaitoh {
4766 1.333 msaitoh struct ixgbe_softc *sc = arg;
4767 1.233 msaitoh
4768 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
4769 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode_timer_pending,
4770 1.254 msaitoh 0, 1) == 0) {
4771 1.333 msaitoh workqueue_enqueue(sc->recovery_mode_timer_wq,
4772 1.333 msaitoh &sc->recovery_mode_timer_wc, NULL);
4773 1.254 msaitoh }
4774 1.233 msaitoh }
4775 1.233 msaitoh }
4776 1.233 msaitoh
4777 1.233 msaitoh static void
4778 1.233 msaitoh ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4779 1.233 msaitoh {
4780 1.333 msaitoh struct ixgbe_softc *sc = context;
4781 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4782 1.169 msaitoh
4783 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4784 1.169 msaitoh if (ixgbe_fw_recovery_mode(hw)) {
4785 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode, 0, 1) == 0) {
4786 1.169 msaitoh /* Firmware error detected, entering recovery mode */
4787 1.333 msaitoh device_printf(sc->dev,
4788 1.319 msaitoh "Firmware recovery mode detected. Limiting "
4789 1.319 msaitoh "functionality. Refer to the Intel(R) Ethernet "
4790 1.319 msaitoh "Adapters and Devices User Guide for details on "
4791 1.319 msaitoh "firmware recovery mode.\n");
4792 1.169 msaitoh
4793 1.169 msaitoh if (hw->adapter_stopped == FALSE)
4794 1.333 msaitoh ixgbe_stop_locked(sc);
4795 1.169 msaitoh }
4796 1.169 msaitoh } else
4797 1.333 msaitoh atomic_cas_uint(&sc->recovery_mode, 1, 0);
4798 1.169 msaitoh
4799 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
4800 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4801 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4802 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4803 1.233 msaitoh } /* ixgbe_handle_recovery_mode_timer */
4804 1.169 msaitoh
4805 1.169 msaitoh /************************************************************************
4806 1.99 msaitoh * ixgbe_handle_mod - Tasklet for SFP module interrupts
4807 1.273 msaitoh * bool int_en: true if it's called when the interrupt is enabled.
4808 1.99 msaitoh ************************************************************************/
4809 1.1 dyoung static void
4810 1.273 msaitoh ixgbe_handle_mod(void *context, bool int_en)
4811 1.1 dyoung {
4812 1.339 msaitoh struct ixgbe_softc *sc = context;
4813 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4814 1.333 msaitoh device_t dev = sc->dev;
4815 1.249 msaitoh enum ixgbe_sfp_type last_sfp_type;
4816 1.251 msaitoh u32 err;
4817 1.249 msaitoh bool last_unsupported_sfp_recovery;
4818 1.98 msaitoh
4819 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4820 1.257 msaitoh
4821 1.249 msaitoh last_sfp_type = hw->phy.sfp_type;
4822 1.249 msaitoh last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4823 1.333 msaitoh IXGBE_EVC_ADD(&sc->mod_workev, 1);
4824 1.333 msaitoh if (sc->hw.need_crosstalk_fix) {
4825 1.251 msaitoh if ((hw->mac.type != ixgbe_mac_82598EB) &&
4826 1.251 msaitoh !ixgbe_sfp_cage_full(hw))
4827 1.218 msaitoh goto out;
4828 1.98 msaitoh }
4829 1.98 msaitoh
4830 1.98 msaitoh err = hw->phy.ops.identify_sfp(hw);
4831 1.98 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4832 1.249 msaitoh if (last_unsupported_sfp_recovery == false)
4833 1.249 msaitoh device_printf(dev,
4834 1.249 msaitoh "Unsupported SFP+ module type was detected.\n");
4835 1.218 msaitoh goto out;
4836 1.33 msaitoh }
4837 1.33 msaitoh
4838 1.219 msaitoh if (hw->need_unsupported_sfp_recovery) {
4839 1.219 msaitoh device_printf(dev, "Recovering from unsupported SFP\n");
4840 1.219 msaitoh /*
4841 1.219 msaitoh * We could recover the status by calling setup_sfp(),
4842 1.219 msaitoh * setup_link() and some others. It's complex and might not
4843 1.219 msaitoh * work correctly on some unknown cases. To avoid such type of
4844 1.219 msaitoh * problem, call ixgbe_init_locked(). It's simple and safe
4845 1.219 msaitoh * approach.
4846 1.219 msaitoh */
4847 1.333 msaitoh ixgbe_init_locked(sc);
4848 1.249 msaitoh } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4849 1.249 msaitoh (hw->phy.sfp_type != last_sfp_type)) {
4850 1.249 msaitoh /* A module is inserted and changed. */
4851 1.249 msaitoh
4852 1.219 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4853 1.219 msaitoh err = hw->phy.ops.reset(hw);
4854 1.219 msaitoh else {
4855 1.219 msaitoh err = hw->mac.ops.setup_sfp(hw);
4856 1.219 msaitoh hw->phy.sfp_setup_needed = FALSE;
4857 1.219 msaitoh }
4858 1.219 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4859 1.219 msaitoh device_printf(dev,
4860 1.219 msaitoh "Setup failure - unsupported SFP+ module type.\n");
4861 1.219 msaitoh goto out;
4862 1.219 msaitoh }
4863 1.1 dyoung }
4864 1.233 msaitoh
4865 1.218 msaitoh out:
4866 1.233 msaitoh /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4867 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
4868 1.233 msaitoh
4869 1.233 msaitoh /* Adjust media types shown in ifconfig */
4870 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4871 1.333 msaitoh ifmedia_removeall(&sc->media);
4872 1.333 msaitoh ixgbe_add_media_types(sc);
4873 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
4874 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4875 1.233 msaitoh
4876 1.249 msaitoh /*
4877 1.288 andvar * Don't schedule MSF event if the chip is 82598. 82598 doesn't support
4878 1.249 msaitoh * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4879 1.250 msaitoh * flap because the function calls setup_link().
4880 1.249 msaitoh */
4881 1.260 knakahar if (hw->mac.type != ixgbe_mac_82598EB) {
4882 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4883 1.273 msaitoh if (int_en)
4884 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4885 1.273 msaitoh else
4886 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4887 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4888 1.260 knakahar }
4889 1.249 msaitoh
4890 1.233 msaitoh /*
4891 1.233 msaitoh * Don't call ixgbe_schedule_admin_tasklet() because we are on
4892 1.233 msaitoh * the workqueue now.
4893 1.233 msaitoh */
4894 1.99 msaitoh } /* ixgbe_handle_mod */
4895 1.1 dyoung
4896 1.1 dyoung
4897 1.99 msaitoh /************************************************************************
4898 1.99 msaitoh * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4899 1.99 msaitoh ************************************************************************/
4900 1.33 msaitoh static void
4901 1.233 msaitoh ixgbe_handle_msf(void *context)
4902 1.33 msaitoh {
4903 1.339 msaitoh struct ixgbe_softc *sc = context;
4904 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4905 1.186 msaitoh u32 autoneg;
4906 1.186 msaitoh bool negotiate;
4907 1.33 msaitoh
4908 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4909 1.257 msaitoh
4910 1.333 msaitoh IXGBE_EVC_ADD(&sc->msf_workev, 1);
4911 1.33 msaitoh
4912 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
4913 1.98 msaitoh if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4914 1.98 msaitoh hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4915 1.98 msaitoh if (hw->mac.ops.setup_link)
4916 1.98 msaitoh hw->mac.ops.setup_link(hw, autoneg, TRUE);
4917 1.99 msaitoh } /* ixgbe_handle_msf */
4918 1.33 msaitoh
4919 1.99 msaitoh /************************************************************************
4920 1.99 msaitoh * ixgbe_handle_phy - Tasklet for external PHY interrupts
4921 1.99 msaitoh ************************************************************************/
4922 1.1 dyoung static void
4923 1.98 msaitoh ixgbe_handle_phy(void *context)
4924 1.1 dyoung {
4925 1.339 msaitoh struct ixgbe_softc *sc = context;
4926 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4927 1.98 msaitoh int error;
4928 1.1 dyoung
4929 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4930 1.257 msaitoh
4931 1.333 msaitoh IXGBE_EVC_ADD(&sc->phy_workev, 1);
4932 1.98 msaitoh error = hw->phy.ops.handle_lasi(hw);
4933 1.98 msaitoh if (error == IXGBE_ERR_OVERTEMP)
4934 1.333 msaitoh device_printf(sc->dev,
4935 1.98 msaitoh "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4936 1.98 msaitoh " PHY will downshift to lower power state!\n");
4937 1.98 msaitoh else if (error)
4938 1.333 msaitoh device_printf(sc->dev,
4939 1.99 msaitoh "Error handling LASI interrupt: %d\n", error);
4940 1.99 msaitoh } /* ixgbe_handle_phy */
4941 1.1 dyoung
4942 1.98 msaitoh static void
4943 1.233 msaitoh ixgbe_handle_admin(struct work *wk, void *context)
4944 1.233 msaitoh {
4945 1.339 msaitoh struct ixgbe_softc *sc = context;
4946 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4947 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4948 1.260 knakahar u32 task_requests;
4949 1.273 msaitoh u32 eims_enable = 0;
4950 1.260 knakahar
4951 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4952 1.333 msaitoh sc->admin_pending = 0;
4953 1.333 msaitoh task_requests = sc->task_requests;
4954 1.333 msaitoh sc->task_requests = 0;
4955 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4956 1.233 msaitoh
4957 1.233 msaitoh /*
4958 1.233 msaitoh * Hold the IFNET_LOCK across this entire call. This will
4959 1.333 msaitoh * prevent additional changes to sc->phy_layer
4960 1.233 msaitoh * and serialize calls to this tasklet. We cannot hold the
4961 1.233 msaitoh * CORE_LOCK while calling into the ifmedia functions as
4962 1.233 msaitoh * they call ifmedia_lock() and the lock is CORE_LOCK.
4963 1.233 msaitoh */
4964 1.233 msaitoh IFNET_LOCK(ifp);
4965 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4966 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4967 1.333 msaitoh ixgbe_handle_link(sc);
4968 1.273 msaitoh eims_enable |= IXGBE_EIMS_LSC;
4969 1.273 msaitoh }
4970 1.319 msaitoh if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0)
4971 1.333 msaitoh ixgbe_handle_mod(sc, false);
4972 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4973 1.333 msaitoh ixgbe_handle_mod(sc, true);
4974 1.273 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
4975 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4976 1.273 msaitoh else
4977 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4978 1.260 knakahar }
4979 1.273 msaitoh if ((task_requests
4980 1.273 msaitoh & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4981 1.333 msaitoh ixgbe_handle_msf(sc);
4982 1.273 msaitoh if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4983 1.273 msaitoh (hw->mac.type == ixgbe_mac_82599EB))
4984 1.273 msaitoh eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4985 1.260 knakahar }
4986 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4987 1.333 msaitoh ixgbe_handle_phy(sc);
4988 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4989 1.260 knakahar }
4990 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4991 1.333 msaitoh ixgbe_reinit_fdir(sc);
4992 1.273 msaitoh eims_enable |= IXGBE_EIMS_FLOW_DIR;
4993 1.260 knakahar }
4994 1.233 msaitoh #if 0 /* notyet */
4995 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4996 1.333 msaitoh ixgbe_handle_mbx(sc);
4997 1.273 msaitoh eims_enable |= IXGBE_EIMS_MAILBOX;
4998 1.260 knakahar }
4999 1.233 msaitoh #endif
5000 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
5001 1.233 msaitoh
5002 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
5003 1.233 msaitoh IFNET_UNLOCK(ifp);
5004 1.233 msaitoh } /* ixgbe_handle_admin */
5005 1.233 msaitoh
5006 1.233 msaitoh static void
5007 1.98 msaitoh ixgbe_ifstop(struct ifnet *ifp, int disable)
5008 1.98 msaitoh {
5009 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
5010 1.1 dyoung
5011 1.333 msaitoh IXGBE_CORE_LOCK(sc);
5012 1.333 msaitoh ixgbe_stop_locked(sc);
5013 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
5014 1.223 thorpej
5015 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
5016 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
5017 1.98 msaitoh }
5018 1.1 dyoung
5019 1.99 msaitoh /************************************************************************
5020 1.252 msaitoh * ixgbe_stop_locked - Stop the hardware
5021 1.98 msaitoh *
5022 1.99 msaitoh * Disables all traffic on the adapter by issuing a
5023 1.99 msaitoh * global reset on the MAC and deallocates TX/RX buffers.
5024 1.99 msaitoh ************************************************************************/
5025 1.1 dyoung static void
5026 1.252 msaitoh ixgbe_stop_locked(void *arg)
5027 1.1 dyoung {
5028 1.186 msaitoh struct ifnet *ifp;
5029 1.339 msaitoh struct ixgbe_softc *sc = arg;
5030 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5031 1.99 msaitoh
5032 1.333 msaitoh ifp = sc->ifp;
5033 1.98 msaitoh
5034 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5035 1.98 msaitoh
5036 1.252 msaitoh INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
5037 1.333 msaitoh ixgbe_disable_intr(sc);
5038 1.333 msaitoh callout_stop(&sc->timer);
5039 1.98 msaitoh
5040 1.223 thorpej /* Don't schedule workqueues. */
5041 1.333 msaitoh sc->schedule_wqs_ok = false;
5042 1.223 thorpej
5043 1.98 msaitoh /* Let the stack know...*/
5044 1.98 msaitoh ifp->if_flags &= ~IFF_RUNNING;
5045 1.98 msaitoh
5046 1.98 msaitoh ixgbe_reset_hw(hw);
5047 1.98 msaitoh hw->adapter_stopped = FALSE;
5048 1.98 msaitoh ixgbe_stop_adapter(hw);
5049 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82599EB)
5050 1.98 msaitoh ixgbe_stop_mac_link_on_d3_82599(hw);
5051 1.98 msaitoh /* Turn off the laser - noop with no optics */
5052 1.98 msaitoh ixgbe_disable_tx_laser(hw);
5053 1.1 dyoung
5054 1.98 msaitoh /* Update the stack */
5055 1.333 msaitoh sc->link_up = FALSE;
5056 1.333 msaitoh ixgbe_update_link_status(sc);
5057 1.1 dyoung
5058 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
5059 1.333 msaitoh ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
5060 1.1 dyoung
5061 1.98 msaitoh return;
5062 1.252 msaitoh } /* ixgbe_stop_locked */
5063 1.1 dyoung
5064 1.99 msaitoh /************************************************************************
5065 1.99 msaitoh * ixgbe_update_link_status - Update OS on link state
5066 1.99 msaitoh *
5067 1.99 msaitoh * Note: Only updates the OS on the cached link state.
5068 1.186 msaitoh * The real check of the hardware only happens with
5069 1.186 msaitoh * a link interrupt.
5070 1.99 msaitoh ************************************************************************/
5071 1.98 msaitoh static void
5072 1.333 msaitoh ixgbe_update_link_status(struct ixgbe_softc *sc)
5073 1.1 dyoung {
5074 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5075 1.333 msaitoh device_t dev = sc->dev;
5076 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5077 1.98 msaitoh
5078 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5079 1.136 knakahar
5080 1.333 msaitoh if (sc->link_up) {
5081 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
5082 1.138 knakahar /*
5083 1.138 knakahar * To eliminate influence of the previous state
5084 1.138 knakahar * in the same way as ixgbe_init_locked().
5085 1.138 knakahar */
5086 1.333 msaitoh struct ix_queue *que = sc->queues;
5087 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5088 1.138 knakahar que->eitr_setting = 0;
5089 1.138 knakahar
5090 1.344 msaitoh if (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL) {
5091 1.98 msaitoh /*
5092 1.98 msaitoh * Discard count for both MAC Local Fault and
5093 1.98 msaitoh * Remote Fault because those registers are
5094 1.98 msaitoh * valid only when the link speed is up and
5095 1.98 msaitoh * 10Gbps.
5096 1.98 msaitoh */
5097 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MLFC);
5098 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MRFC);
5099 1.98 msaitoh }
5100 1.98 msaitoh
5101 1.98 msaitoh if (bootverbose) {
5102 1.98 msaitoh const char *bpsmsg;
5103 1.1 dyoung
5104 1.333 msaitoh switch (sc->link_speed) {
5105 1.98 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
5106 1.98 msaitoh bpsmsg = "10 Gbps";
5107 1.98 msaitoh break;
5108 1.98 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
5109 1.98 msaitoh bpsmsg = "5 Gbps";
5110 1.98 msaitoh break;
5111 1.98 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
5112 1.98 msaitoh bpsmsg = "2.5 Gbps";
5113 1.98 msaitoh break;
5114 1.98 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
5115 1.98 msaitoh bpsmsg = "1 Gbps";
5116 1.98 msaitoh break;
5117 1.98 msaitoh case IXGBE_LINK_SPEED_100_FULL:
5118 1.98 msaitoh bpsmsg = "100 Mbps";
5119 1.98 msaitoh break;
5120 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
5121 1.99 msaitoh bpsmsg = "10 Mbps";
5122 1.99 msaitoh break;
5123 1.98 msaitoh default:
5124 1.98 msaitoh bpsmsg = "unknown speed";
5125 1.98 msaitoh break;
5126 1.98 msaitoh }
5127 1.98 msaitoh device_printf(dev, "Link is up %s %s \n",
5128 1.98 msaitoh bpsmsg, "Full Duplex");
5129 1.98 msaitoh }
5130 1.333 msaitoh sc->link_active = LINK_STATE_UP;
5131 1.98 msaitoh /* Update any Flow Control changes */
5132 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5133 1.98 msaitoh /* Update DMA coalescing config */
5134 1.333 msaitoh ixgbe_config_dmac(sc);
5135 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_UP);
5136 1.144 msaitoh
5137 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5138 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5139 1.98 msaitoh }
5140 1.174 msaitoh } else {
5141 1.174 msaitoh /*
5142 1.174 msaitoh * Do it when link active changes to DOWN. i.e.
5143 1.174 msaitoh * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5144 1.186 msaitoh * b) LINK_STATE_UP -> LINK_STATE_DOWN
5145 1.174 msaitoh */
5146 1.333 msaitoh if (sc->link_active != LINK_STATE_DOWN) {
5147 1.98 msaitoh if (bootverbose)
5148 1.98 msaitoh device_printf(dev, "Link is Down\n");
5149 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_DOWN);
5150 1.333 msaitoh sc->link_active = LINK_STATE_DOWN;
5151 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5152 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5153 1.333 msaitoh ixgbe_drain_all(sc);
5154 1.98 msaitoh }
5155 1.1 dyoung }
5156 1.99 msaitoh } /* ixgbe_update_link_status */
5157 1.1 dyoung
5158 1.99 msaitoh /************************************************************************
5159 1.99 msaitoh * ixgbe_config_dmac - Configure DMA Coalescing
5160 1.99 msaitoh ************************************************************************/
5161 1.1 dyoung static void
5162 1.333 msaitoh ixgbe_config_dmac(struct ixgbe_softc *sc)
5163 1.1 dyoung {
5164 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5165 1.98 msaitoh struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5166 1.1 dyoung
5167 1.99 msaitoh if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5168 1.98 msaitoh return;
5169 1.65 msaitoh
5170 1.333 msaitoh if (dcfg->watchdog_timer ^ sc->dmac ||
5171 1.333 msaitoh dcfg->link_speed ^ sc->link_speed) {
5172 1.333 msaitoh dcfg->watchdog_timer = sc->dmac;
5173 1.98 msaitoh dcfg->fcoe_en = false;
5174 1.333 msaitoh dcfg->link_speed = sc->link_speed;
5175 1.98 msaitoh dcfg->num_tcs = 1;
5176 1.51 msaitoh
5177 1.98 msaitoh INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5178 1.98 msaitoh dcfg->watchdog_timer, dcfg->link_speed);
5179 1.51 msaitoh
5180 1.98 msaitoh hw->mac.ops.dmac_config(hw);
5181 1.98 msaitoh }
5182 1.99 msaitoh } /* ixgbe_config_dmac */
5183 1.51 msaitoh
5184 1.99 msaitoh /************************************************************************
5185 1.99 msaitoh * ixgbe_enable_intr
5186 1.99 msaitoh ************************************************************************/
5187 1.98 msaitoh static void
5188 1.333 msaitoh ixgbe_enable_intr(struct ixgbe_softc *sc)
5189 1.98 msaitoh {
5190 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5191 1.333 msaitoh struct ix_queue *que = sc->queues;
5192 1.98 msaitoh u32 mask, fwsm;
5193 1.51 msaitoh
5194 1.98 msaitoh mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5195 1.45 msaitoh
5196 1.333 msaitoh switch (sc->hw.mac.type) {
5197 1.99 msaitoh case ixgbe_mac_82599EB:
5198 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5199 1.99 msaitoh /* Temperature sensor on some adapters */
5200 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0;
5201 1.99 msaitoh /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5202 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5203 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP2;
5204 1.99 msaitoh break;
5205 1.99 msaitoh case ixgbe_mac_X540:
5206 1.99 msaitoh /* Detect if Thermal Sensor is enabled */
5207 1.99 msaitoh fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5208 1.99 msaitoh if (fwsm & IXGBE_FWSM_TS_ENABLED)
5209 1.98 msaitoh mask |= IXGBE_EIMS_TS;
5210 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5211 1.99 msaitoh break;
5212 1.99 msaitoh case ixgbe_mac_X550:
5213 1.99 msaitoh /* MAC thermal sensor is automatically enabled */
5214 1.99 msaitoh mask |= IXGBE_EIMS_TS;
5215 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5216 1.99 msaitoh break;
5217 1.99 msaitoh case ixgbe_mac_X550EM_x:
5218 1.99 msaitoh case ixgbe_mac_X550EM_a:
5219 1.99 msaitoh /* Some devices use SDP0 for important information */
5220 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5221 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5222 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5223 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5224 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5225 1.99 msaitoh if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5226 1.99 msaitoh mask |= IXGBE_EICR_GPI_SDP0_X540;
5227 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5228 1.99 msaitoh break;
5229 1.99 msaitoh default:
5230 1.99 msaitoh break;
5231 1.1 dyoung }
5232 1.51 msaitoh
5233 1.99 msaitoh /* Enable Fan Failure detection */
5234 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
5235 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5236 1.99 msaitoh /* Enable SR-IOV */
5237 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5238 1.99 msaitoh mask |= IXGBE_EIMS_MAILBOX;
5239 1.99 msaitoh /* Enable Flow Director */
5240 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FDIR)
5241 1.99 msaitoh mask |= IXGBE_EIMS_FLOW_DIR;
5242 1.99 msaitoh
5243 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5244 1.64 msaitoh
5245 1.98 msaitoh /* With MSI-X we use auto clear */
5246 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0) {
5247 1.270 msaitoh /*
5248 1.309 msaitoh * We use auto clear for RTX_QUEUE only. Don't use other
5249 1.309 msaitoh * interrupts (e.g. link interrupt). BTW, we don't use
5250 1.309 msaitoh * TCP_TIMER interrupt itself.
5251 1.270 msaitoh */
5252 1.270 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5253 1.98 msaitoh }
5254 1.1 dyoung
5255 1.98 msaitoh /*
5256 1.99 msaitoh * Now enable all queues, this is done separately to
5257 1.99 msaitoh * allow for handling the extended (beyond 32) MSI-X
5258 1.99 msaitoh * vectors that can be used by 82599
5259 1.99 msaitoh */
5260 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5261 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
5262 1.1 dyoung
5263 1.98 msaitoh IXGBE_WRITE_FLUSH(hw);
5264 1.43 msaitoh
5265 1.99 msaitoh } /* ixgbe_enable_intr */
5266 1.1 dyoung
5267 1.99 msaitoh /************************************************************************
5268 1.139 knakahar * ixgbe_disable_intr_internal
5269 1.99 msaitoh ************************************************************************/
5270 1.44 msaitoh static void
5271 1.333 msaitoh ixgbe_disable_intr_internal(struct ixgbe_softc *sc, bool nestok)
5272 1.44 msaitoh {
5273 1.333 msaitoh struct ix_queue *que = sc->queues;
5274 1.127 knakahar
5275 1.127 knakahar /* disable interrupts other than queues */
5276 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5277 1.127 knakahar
5278 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0)
5279 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
5280 1.127 knakahar
5281 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5282 1.333 msaitoh ixgbe_disable_queue_internal(sc, que->msix, nestok);
5283 1.127 knakahar
5284 1.333 msaitoh IXGBE_WRITE_FLUSH(&sc->hw);
5285 1.99 msaitoh
5286 1.139 knakahar } /* ixgbe_do_disable_intr_internal */
5287 1.139 knakahar
5288 1.139 knakahar /************************************************************************
5289 1.139 knakahar * ixgbe_disable_intr
5290 1.139 knakahar ************************************************************************/
5291 1.139 knakahar static void
5292 1.333 msaitoh ixgbe_disable_intr(struct ixgbe_softc *sc)
5293 1.139 knakahar {
5294 1.139 knakahar
5295 1.333 msaitoh ixgbe_disable_intr_internal(sc, true);
5296 1.99 msaitoh } /* ixgbe_disable_intr */
5297 1.98 msaitoh
5298 1.99 msaitoh /************************************************************************
5299 1.139 knakahar * ixgbe_ensure_disabled_intr
5300 1.139 knakahar ************************************************************************/
5301 1.139 knakahar void
5302 1.333 msaitoh ixgbe_ensure_disabled_intr(struct ixgbe_softc *sc)
5303 1.139 knakahar {
5304 1.139 knakahar
5305 1.333 msaitoh ixgbe_disable_intr_internal(sc, false);
5306 1.139 knakahar } /* ixgbe_ensure_disabled_intr */
5307 1.139 knakahar
5308 1.139 knakahar /************************************************************************
5309 1.99 msaitoh * ixgbe_legacy_irq - Legacy Interrupt Service routine
5310 1.99 msaitoh ************************************************************************/
5311 1.98 msaitoh static int
5312 1.98 msaitoh ixgbe_legacy_irq(void *arg)
5313 1.1 dyoung {
5314 1.98 msaitoh struct ix_queue *que = arg;
5315 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
5316 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5317 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5318 1.341 msaitoh struct tx_ring *txr = sc->tx_rings;
5319 1.277 msaitoh u32 eicr;
5320 1.269 msaitoh u32 eims_orig;
5321 1.273 msaitoh u32 eims_enable = 0;
5322 1.273 msaitoh u32 eims_disable = 0;
5323 1.98 msaitoh
5324 1.269 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5325 1.269 msaitoh /*
5326 1.269 msaitoh * Silicon errata #26 on 82598. Disable all interrupts before reading
5327 1.269 msaitoh * EICR.
5328 1.269 msaitoh */
5329 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5330 1.98 msaitoh
5331 1.268 msaitoh /* Read and clear EICR */
5332 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5333 1.44 msaitoh
5334 1.99 msaitoh if (eicr == 0) {
5335 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.intzero, 1);
5336 1.269 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5337 1.98 msaitoh return 0;
5338 1.98 msaitoh }
5339 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.legint, 1);
5340 1.44 msaitoh
5341 1.272 msaitoh /* Queue (0) intr */
5342 1.308 msaitoh if (((ifp->if_flags & IFF_RUNNING) != 0) &&
5343 1.308 msaitoh (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5344 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
5345 1.272 msaitoh
5346 1.147 knakahar /*
5347 1.265 msaitoh * The same as ixgbe_msix_que() about
5348 1.265 msaitoh * "que->txrx_use_workqueue".
5349 1.147 knakahar */
5350 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
5351 1.147 knakahar
5352 1.98 msaitoh IXGBE_TX_LOCK(txr);
5353 1.98 msaitoh ixgbe_txeof(txr);
5354 1.99 msaitoh #ifdef notyet
5355 1.99 msaitoh if (!ixgbe_ring_empty(ifp, txr->br))
5356 1.99 msaitoh ixgbe_start_locked(ifp, txr);
5357 1.99 msaitoh #endif
5358 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
5359 1.271 msaitoh
5360 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
5361 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
5362 1.273 msaitoh /* Disable queue 0 interrupt */
5363 1.273 msaitoh eims_disable |= 1UL << 0;
5364 1.273 msaitoh } else
5365 1.317 msaitoh eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE;
5366 1.44 msaitoh
5367 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
5368 1.233 msaitoh
5369 1.273 msaitoh /* Re-enable some interrupts */
5370 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5371 1.273 msaitoh (eims_orig & ~eims_disable) | eims_enable);
5372 1.99 msaitoh
5373 1.98 msaitoh return 1;
5374 1.99 msaitoh } /* ixgbe_legacy_irq */
5375 1.98 msaitoh
5376 1.99 msaitoh /************************************************************************
5377 1.119 msaitoh * ixgbe_free_pciintr_resources
5378 1.99 msaitoh ************************************************************************/
5379 1.98 msaitoh static void
5380 1.333 msaitoh ixgbe_free_pciintr_resources(struct ixgbe_softc *sc)
5381 1.44 msaitoh {
5382 1.333 msaitoh struct ix_queue *que = sc->queues;
5383 1.98 msaitoh int rid;
5384 1.44 msaitoh
5385 1.98 msaitoh /*
5386 1.99 msaitoh * Release all msix queue resources:
5387 1.99 msaitoh */
5388 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
5389 1.119 msaitoh if (que->res != NULL) {
5390 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[i]);
5391 1.333 msaitoh sc->osdep.ihs[i] = NULL;
5392 1.119 msaitoh }
5393 1.58 msaitoh }
5394 1.58 msaitoh
5395 1.98 msaitoh /* Clean the Legacy or Link interrupt last */
5396 1.333 msaitoh if (sc->vector) /* we are doing MSIX */
5397 1.333 msaitoh rid = sc->vector;
5398 1.98 msaitoh else
5399 1.98 msaitoh rid = 0;
5400 1.44 msaitoh
5401 1.333 msaitoh if (sc->osdep.ihs[rid] != NULL) {
5402 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[rid]);
5403 1.333 msaitoh sc->osdep.ihs[rid] = NULL;
5404 1.98 msaitoh }
5405 1.44 msaitoh
5406 1.333 msaitoh if (sc->osdep.intrs != NULL) {
5407 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
5408 1.333 msaitoh sc->osdep.nintrs);
5409 1.333 msaitoh sc->osdep.intrs = NULL;
5410 1.119 msaitoh }
5411 1.119 msaitoh } /* ixgbe_free_pciintr_resources */
5412 1.119 msaitoh
5413 1.119 msaitoh /************************************************************************
5414 1.119 msaitoh * ixgbe_free_pci_resources
5415 1.119 msaitoh ************************************************************************/
5416 1.119 msaitoh static void
5417 1.333 msaitoh ixgbe_free_pci_resources(struct ixgbe_softc *sc)
5418 1.119 msaitoh {
5419 1.119 msaitoh
5420 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
5421 1.44 msaitoh
5422 1.333 msaitoh if (sc->osdep.mem_size != 0) {
5423 1.333 msaitoh bus_space_unmap(sc->osdep.mem_bus_space_tag,
5424 1.333 msaitoh sc->osdep.mem_bus_space_handle,
5425 1.333 msaitoh sc->osdep.mem_size);
5426 1.44 msaitoh }
5427 1.99 msaitoh } /* ixgbe_free_pci_resources */
5428 1.44 msaitoh
5429 1.99 msaitoh /************************************************************************
5430 1.99 msaitoh * ixgbe_sysctl_flowcntl
5431 1.99 msaitoh *
5432 1.99 msaitoh * SYSCTL wrapper around setting Flow Control
5433 1.99 msaitoh ************************************************************************/
5434 1.98 msaitoh static int
5435 1.98 msaitoh ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5436 1.98 msaitoh {
5437 1.98 msaitoh struct sysctlnode node = *rnode;
5438 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5439 1.99 msaitoh int error, fc;
5440 1.82 msaitoh
5441 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5442 1.169 msaitoh return (EPERM);
5443 1.169 msaitoh
5444 1.333 msaitoh fc = sc->hw.fc.current_mode;
5445 1.98 msaitoh node.sysctl_data = &fc;
5446 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5447 1.98 msaitoh if (error != 0 || newp == NULL)
5448 1.98 msaitoh return error;
5449 1.82 msaitoh
5450 1.98 msaitoh /* Don't bother if it's not changed */
5451 1.333 msaitoh if (fc == sc->hw.fc.current_mode)
5452 1.98 msaitoh return (0);
5453 1.83 msaitoh
5454 1.333 msaitoh return ixgbe_set_flowcntl(sc, fc);
5455 1.99 msaitoh } /* ixgbe_sysctl_flowcntl */
5456 1.1 dyoung
5457 1.99 msaitoh /************************************************************************
5458 1.99 msaitoh * ixgbe_set_flowcntl - Set flow control
5459 1.99 msaitoh *
5460 1.99 msaitoh * Flow control values:
5461 1.99 msaitoh * 0 - off
5462 1.99 msaitoh * 1 - rx pause
5463 1.99 msaitoh * 2 - tx pause
5464 1.99 msaitoh * 3 - full
5465 1.99 msaitoh ************************************************************************/
5466 1.98 msaitoh static int
5467 1.333 msaitoh ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
5468 1.98 msaitoh {
5469 1.98 msaitoh switch (fc) {
5470 1.98 msaitoh case ixgbe_fc_rx_pause:
5471 1.98 msaitoh case ixgbe_fc_tx_pause:
5472 1.98 msaitoh case ixgbe_fc_full:
5473 1.333 msaitoh sc->hw.fc.requested_mode = fc;
5474 1.333 msaitoh if (sc->num_queues > 1)
5475 1.333 msaitoh ixgbe_disable_rx_drop(sc);
5476 1.98 msaitoh break;
5477 1.98 msaitoh case ixgbe_fc_none:
5478 1.333 msaitoh sc->hw.fc.requested_mode = ixgbe_fc_none;
5479 1.333 msaitoh if (sc->num_queues > 1)
5480 1.333 msaitoh ixgbe_enable_rx_drop(sc);
5481 1.98 msaitoh break;
5482 1.98 msaitoh default:
5483 1.98 msaitoh return (EINVAL);
5484 1.1 dyoung }
5485 1.99 msaitoh
5486 1.98 msaitoh #if 0 /* XXX NetBSD */
5487 1.98 msaitoh /* Don't autoneg if forcing a value */
5488 1.333 msaitoh sc->hw.fc.disable_fc_autoneg = TRUE;
5489 1.98 msaitoh #endif
5490 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5491 1.99 msaitoh
5492 1.98 msaitoh return (0);
5493 1.99 msaitoh } /* ixgbe_set_flowcntl */
5494 1.1 dyoung
5495 1.99 msaitoh /************************************************************************
5496 1.99 msaitoh * ixgbe_enable_rx_drop
5497 1.99 msaitoh *
5498 1.99 msaitoh * Enable the hardware to drop packets when the buffer is
5499 1.99 msaitoh * full. This is useful with multiqueue, so that no single
5500 1.99 msaitoh * queue being full stalls the entire RX engine. We only
5501 1.99 msaitoh * enable this when Multiqueue is enabled AND Flow Control
5502 1.99 msaitoh * is disabled.
5503 1.99 msaitoh ************************************************************************/
5504 1.98 msaitoh static void
5505 1.333 msaitoh ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
5506 1.98 msaitoh {
5507 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5508 1.186 msaitoh struct rx_ring *rxr;
5509 1.186 msaitoh u32 srrctl;
5510 1.1 dyoung
5511 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5512 1.333 msaitoh rxr = &sc->rx_rings[i];
5513 1.99 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5514 1.99 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
5515 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5516 1.98 msaitoh }
5517 1.99 msaitoh
5518 1.98 msaitoh /* enable drop for each vf */
5519 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5520 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5521 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5522 1.98 msaitoh IXGBE_QDE_ENABLE));
5523 1.98 msaitoh }
5524 1.99 msaitoh } /* ixgbe_enable_rx_drop */
5525 1.43 msaitoh
5526 1.99 msaitoh /************************************************************************
5527 1.99 msaitoh * ixgbe_disable_rx_drop
5528 1.99 msaitoh ************************************************************************/
5529 1.98 msaitoh static void
5530 1.333 msaitoh ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
5531 1.98 msaitoh {
5532 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5533 1.186 msaitoh struct rx_ring *rxr;
5534 1.186 msaitoh u32 srrctl;
5535 1.43 msaitoh
5536 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5537 1.333 msaitoh rxr = &sc->rx_rings[i];
5538 1.186 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5539 1.186 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5540 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5541 1.98 msaitoh }
5542 1.99 msaitoh
5543 1.98 msaitoh /* disable drop for each vf */
5544 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5545 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5546 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5547 1.1 dyoung }
5548 1.99 msaitoh } /* ixgbe_disable_rx_drop */
5549 1.98 msaitoh
5550 1.99 msaitoh /************************************************************************
5551 1.99 msaitoh * ixgbe_sysctl_advertise
5552 1.99 msaitoh *
5553 1.99 msaitoh * SYSCTL wrapper around setting advertised speed
5554 1.99 msaitoh ************************************************************************/
5555 1.98 msaitoh static int
5556 1.98 msaitoh ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5557 1.98 msaitoh {
5558 1.99 msaitoh struct sysctlnode node = *rnode;
5559 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5560 1.186 msaitoh int error = 0, advertise;
5561 1.1 dyoung
5562 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5563 1.169 msaitoh return (EPERM);
5564 1.169 msaitoh
5565 1.333 msaitoh advertise = sc->advertise;
5566 1.98 msaitoh node.sysctl_data = &advertise;
5567 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5568 1.98 msaitoh if (error != 0 || newp == NULL)
5569 1.98 msaitoh return error;
5570 1.28 msaitoh
5571 1.333 msaitoh return ixgbe_set_advertise(sc, advertise);
5572 1.99 msaitoh } /* ixgbe_sysctl_advertise */
5573 1.1 dyoung
5574 1.99 msaitoh /************************************************************************
5575 1.99 msaitoh * ixgbe_set_advertise - Control advertised link speed
5576 1.99 msaitoh *
5577 1.99 msaitoh * Flags:
5578 1.103 msaitoh * 0x00 - Default (all capable link speed)
5579 1.296 msaitoh * 0x1 - advertise 100 Mb
5580 1.296 msaitoh * 0x2 - advertise 1G
5581 1.296 msaitoh * 0x4 - advertise 10G
5582 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5583 1.103 msaitoh * 0x10 - advertise 2.5G
5584 1.103 msaitoh * 0x20 - advertise 5G
5585 1.99 msaitoh ************************************************************************/
5586 1.98 msaitoh static int
5587 1.333 msaitoh ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
5588 1.1 dyoung {
5589 1.186 msaitoh device_t dev;
5590 1.186 msaitoh struct ixgbe_hw *hw;
5591 1.99 msaitoh ixgbe_link_speed speed = 0;
5592 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5593 1.186 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
5594 1.186 msaitoh bool negotiate = FALSE;
5595 1.98 msaitoh
5596 1.98 msaitoh /* Checks to validate new value */
5597 1.333 msaitoh if (sc->advertise == advertise) /* no change */
5598 1.98 msaitoh return (0);
5599 1.98 msaitoh
5600 1.333 msaitoh dev = sc->dev;
5601 1.333 msaitoh hw = &sc->hw;
5602 1.98 msaitoh
5603 1.98 msaitoh /* No speed changes for backplane media */
5604 1.98 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
5605 1.98 msaitoh return (ENODEV);
5606 1.98 msaitoh
5607 1.98 msaitoh if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5608 1.98 msaitoh (hw->phy.multispeed_fiber))) {
5609 1.98 msaitoh device_printf(dev,
5610 1.98 msaitoh "Advertised speed can only be set on copper or "
5611 1.98 msaitoh "multispeed fiber media types.\n");
5612 1.98 msaitoh return (EINVAL);
5613 1.98 msaitoh }
5614 1.98 msaitoh
5615 1.259 msaitoh if (advertise < 0x0 || advertise > 0x3f) {
5616 1.319 msaitoh device_printf(dev, "Invalid advertised speed; "
5617 1.319 msaitoh "valid modes are 0x0 through 0x3f\n");
5618 1.98 msaitoh return (EINVAL);
5619 1.98 msaitoh }
5620 1.1 dyoung
5621 1.99 msaitoh if (hw->mac.ops.get_link_capabilities) {
5622 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5623 1.99 msaitoh &negotiate);
5624 1.99 msaitoh if (err != IXGBE_SUCCESS) {
5625 1.319 msaitoh device_printf(dev, "Unable to determine supported "
5626 1.319 msaitoh "advertise speeds\n");
5627 1.99 msaitoh return (ENODEV);
5628 1.99 msaitoh }
5629 1.99 msaitoh }
5630 1.99 msaitoh
5631 1.98 msaitoh /* Set new value and report new advertised mode */
5632 1.99 msaitoh if (advertise & 0x1) {
5633 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5634 1.319 msaitoh device_printf(dev, "Interface does not support 100Mb "
5635 1.319 msaitoh "advertised speed\n");
5636 1.98 msaitoh return (EINVAL);
5637 1.98 msaitoh }
5638 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
5639 1.99 msaitoh }
5640 1.99 msaitoh if (advertise & 0x2) {
5641 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5642 1.319 msaitoh device_printf(dev, "Interface does not support 1Gb "
5643 1.319 msaitoh "advertised speed\n");
5644 1.99 msaitoh return (EINVAL);
5645 1.99 msaitoh }
5646 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
5647 1.99 msaitoh }
5648 1.99 msaitoh if (advertise & 0x4) {
5649 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5650 1.319 msaitoh device_printf(dev, "Interface does not support 10Gb "
5651 1.319 msaitoh "advertised speed\n");
5652 1.99 msaitoh return (EINVAL);
5653 1.99 msaitoh }
5654 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
5655 1.99 msaitoh }
5656 1.99 msaitoh if (advertise & 0x8) {
5657 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5658 1.319 msaitoh device_printf(dev, "Interface does not support 10Mb "
5659 1.319 msaitoh "advertised speed\n");
5660 1.99 msaitoh return (EINVAL);
5661 1.99 msaitoh }
5662 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
5663 1.99 msaitoh }
5664 1.103 msaitoh if (advertise & 0x10) {
5665 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5666 1.319 msaitoh device_printf(dev, "Interface does not support 2.5Gb "
5667 1.319 msaitoh "advertised speed\n");
5668 1.103 msaitoh return (EINVAL);
5669 1.103 msaitoh }
5670 1.103 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5671 1.103 msaitoh }
5672 1.103 msaitoh if (advertise & 0x20) {
5673 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5674 1.319 msaitoh device_printf(dev, "Interface does not support 5Gb "
5675 1.319 msaitoh "advertised speed\n");
5676 1.103 msaitoh return (EINVAL);
5677 1.103 msaitoh }
5678 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
5679 1.103 msaitoh }
5680 1.99 msaitoh if (advertise == 0)
5681 1.99 msaitoh speed = link_caps; /* All capable link speed */
5682 1.1 dyoung
5683 1.98 msaitoh hw->mac.autotry_restart = TRUE;
5684 1.98 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
5685 1.333 msaitoh sc->advertise = advertise;
5686 1.1 dyoung
5687 1.99 msaitoh return (0);
5688 1.99 msaitoh } /* ixgbe_set_advertise */
5689 1.1 dyoung
5690 1.99 msaitoh /************************************************************************
5691 1.296 msaitoh * ixgbe_get_default_advertise - Get default advertised speed settings
5692 1.99 msaitoh *
5693 1.99 msaitoh * Formatted for sysctl usage.
5694 1.99 msaitoh * Flags:
5695 1.296 msaitoh * 0x1 - advertise 100 Mb
5696 1.296 msaitoh * 0x2 - advertise 1G
5697 1.296 msaitoh * 0x4 - advertise 10G
5698 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5699 1.103 msaitoh * 0x10 - advertise 2.5G
5700 1.103 msaitoh * 0x20 - advertise 5G
5701 1.99 msaitoh ************************************************************************/
5702 1.98 msaitoh static int
5703 1.333 msaitoh ixgbe_get_default_advertise(struct ixgbe_softc *sc)
5704 1.1 dyoung {
5705 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5706 1.186 msaitoh int speed;
5707 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5708 1.186 msaitoh s32 err;
5709 1.186 msaitoh bool negotiate = FALSE;
5710 1.98 msaitoh
5711 1.99 msaitoh /*
5712 1.99 msaitoh * Advertised speed means nothing unless it's copper or
5713 1.99 msaitoh * multi-speed fiber
5714 1.99 msaitoh */
5715 1.99 msaitoh if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5716 1.99 msaitoh !(hw->phy.multispeed_fiber))
5717 1.99 msaitoh return (0);
5718 1.1 dyoung
5719 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5720 1.99 msaitoh if (err != IXGBE_SUCCESS)
5721 1.99 msaitoh return (0);
5722 1.1 dyoung
5723 1.99 msaitoh speed =
5724 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5725 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5726 1.103 msaitoh ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5727 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5728 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5729 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5730 1.99 msaitoh
5731 1.99 msaitoh return speed;
5732 1.296 msaitoh } /* ixgbe_get_default_advertise */
5733 1.99 msaitoh
5734 1.99 msaitoh /************************************************************************
5735 1.99 msaitoh * ixgbe_sysctl_dmac - Manage DMA Coalescing
5736 1.99 msaitoh *
5737 1.99 msaitoh * Control values:
5738 1.99 msaitoh * 0/1 - off / on (use default value of 1000)
5739 1.99 msaitoh *
5740 1.99 msaitoh * Legal timer values are:
5741 1.99 msaitoh * 50,100,250,500,1000,2000,5000,10000
5742 1.99 msaitoh *
5743 1.99 msaitoh * Turning off interrupt moderation will also turn this off.
5744 1.99 msaitoh ************************************************************************/
5745 1.1 dyoung static int
5746 1.98 msaitoh ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5747 1.1 dyoung {
5748 1.44 msaitoh struct sysctlnode node = *rnode;
5749 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5750 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5751 1.186 msaitoh int error;
5752 1.186 msaitoh int newval;
5753 1.1 dyoung
5754 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5755 1.169 msaitoh return (EPERM);
5756 1.169 msaitoh
5757 1.333 msaitoh newval = sc->dmac;
5758 1.98 msaitoh node.sysctl_data = &newval;
5759 1.22 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5760 1.98 msaitoh if ((error) || (newp == NULL))
5761 1.98 msaitoh return (error);
5762 1.98 msaitoh
5763 1.98 msaitoh switch (newval) {
5764 1.98 msaitoh case 0:
5765 1.98 msaitoh /* Disabled */
5766 1.333 msaitoh sc->dmac = 0;
5767 1.98 msaitoh break;
5768 1.98 msaitoh case 1:
5769 1.98 msaitoh /* Enable and use default */
5770 1.333 msaitoh sc->dmac = 1000;
5771 1.98 msaitoh break;
5772 1.98 msaitoh case 50:
5773 1.98 msaitoh case 100:
5774 1.98 msaitoh case 250:
5775 1.98 msaitoh case 500:
5776 1.98 msaitoh case 1000:
5777 1.98 msaitoh case 2000:
5778 1.98 msaitoh case 5000:
5779 1.98 msaitoh case 10000:
5780 1.98 msaitoh /* Legal values - allow */
5781 1.333 msaitoh sc->dmac = newval;
5782 1.98 msaitoh break;
5783 1.98 msaitoh default:
5784 1.98 msaitoh /* Do nothing, illegal value */
5785 1.98 msaitoh return (EINVAL);
5786 1.22 msaitoh }
5787 1.1 dyoung
5788 1.98 msaitoh /* Re-initialize hardware if it's already running */
5789 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING)
5790 1.302 riastrad if_init(ifp);
5791 1.1 dyoung
5792 1.98 msaitoh return (0);
5793 1.1 dyoung }
5794 1.1 dyoung
5795 1.98 msaitoh #ifdef IXGBE_DEBUG
5796 1.99 msaitoh /************************************************************************
5797 1.99 msaitoh * ixgbe_sysctl_power_state
5798 1.99 msaitoh *
5799 1.99 msaitoh * Sysctl to test power states
5800 1.99 msaitoh * Values:
5801 1.99 msaitoh * 0 - set device to D0
5802 1.99 msaitoh * 3 - set device to D3
5803 1.99 msaitoh * (none) - get current device power state
5804 1.99 msaitoh ************************************************************************/
5805 1.98 msaitoh static int
5806 1.98 msaitoh ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5807 1.44 msaitoh {
5808 1.99 msaitoh #ifdef notyet
5809 1.98 msaitoh struct sysctlnode node = *rnode;
5810 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5811 1.333 msaitoh device_t dev = sc->dev;
5812 1.186 msaitoh int curr_ps, new_ps, error = 0;
5813 1.44 msaitoh
5814 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5815 1.169 msaitoh return (EPERM);
5816 1.169 msaitoh
5817 1.98 msaitoh curr_ps = new_ps = pci_get_powerstate(dev);
5818 1.44 msaitoh
5819 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5820 1.98 msaitoh if ((error) || (req->newp == NULL))
5821 1.98 msaitoh return (error);
5822 1.44 msaitoh
5823 1.98 msaitoh if (new_ps == curr_ps)
5824 1.98 msaitoh return (0);
5825 1.44 msaitoh
5826 1.98 msaitoh if (new_ps == 3 && curr_ps == 0)
5827 1.98 msaitoh error = DEVICE_SUSPEND(dev);
5828 1.98 msaitoh else if (new_ps == 0 && curr_ps == 3)
5829 1.98 msaitoh error = DEVICE_RESUME(dev);
5830 1.98 msaitoh else
5831 1.98 msaitoh return (EINVAL);
5832 1.44 msaitoh
5833 1.98 msaitoh device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5834 1.44 msaitoh
5835 1.98 msaitoh return (error);
5836 1.98 msaitoh #else
5837 1.98 msaitoh return 0;
5838 1.98 msaitoh #endif
5839 1.99 msaitoh } /* ixgbe_sysctl_power_state */
5840 1.98 msaitoh #endif
5841 1.99 msaitoh
5842 1.99 msaitoh /************************************************************************
5843 1.99 msaitoh * ixgbe_sysctl_wol_enable
5844 1.99 msaitoh *
5845 1.99 msaitoh * Sysctl to enable/disable the WoL capability,
5846 1.99 msaitoh * if supported by the adapter.
5847 1.99 msaitoh *
5848 1.99 msaitoh * Values:
5849 1.99 msaitoh * 0 - disabled
5850 1.99 msaitoh * 1 - enabled
5851 1.99 msaitoh ************************************************************************/
5852 1.98 msaitoh static int
5853 1.98 msaitoh ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5854 1.98 msaitoh {
5855 1.98 msaitoh struct sysctlnode node = *rnode;
5856 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5857 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5858 1.186 msaitoh bool new_wol_enabled;
5859 1.186 msaitoh int error = 0;
5860 1.44 msaitoh
5861 1.169 msaitoh /*
5862 1.169 msaitoh * It's not required to check recovery mode because this function never
5863 1.169 msaitoh * touches hardware.
5864 1.169 msaitoh */
5865 1.98 msaitoh new_wol_enabled = hw->wol_enabled;
5866 1.98 msaitoh node.sysctl_data = &new_wol_enabled;
5867 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5868 1.98 msaitoh if ((error) || (newp == NULL))
5869 1.98 msaitoh return (error);
5870 1.98 msaitoh if (new_wol_enabled == hw->wol_enabled)
5871 1.98 msaitoh return (0);
5872 1.44 msaitoh
5873 1.333 msaitoh if (new_wol_enabled && !sc->wol_support)
5874 1.98 msaitoh return (ENODEV);
5875 1.98 msaitoh else
5876 1.98 msaitoh hw->wol_enabled = new_wol_enabled;
5877 1.44 msaitoh
5878 1.98 msaitoh return (0);
5879 1.99 msaitoh } /* ixgbe_sysctl_wol_enable */
5880 1.48 msaitoh
5881 1.99 msaitoh /************************************************************************
5882 1.99 msaitoh * ixgbe_sysctl_wufc - Wake Up Filter Control
5883 1.99 msaitoh *
5884 1.99 msaitoh * Sysctl to enable/disable the types of packets that the
5885 1.99 msaitoh * adapter will wake up on upon receipt.
5886 1.99 msaitoh * Flags:
5887 1.99 msaitoh * 0x1 - Link Status Change
5888 1.99 msaitoh * 0x2 - Magic Packet
5889 1.99 msaitoh * 0x4 - Direct Exact
5890 1.99 msaitoh * 0x8 - Directed Multicast
5891 1.99 msaitoh * 0x10 - Broadcast
5892 1.99 msaitoh * 0x20 - ARP/IPv4 Request Packet
5893 1.99 msaitoh * 0x40 - Direct IPv4 Packet
5894 1.99 msaitoh * 0x80 - Direct IPv6 Packet
5895 1.98 msaitoh *
5896 1.99 msaitoh * Settings not listed above will cause the sysctl to return an error.
5897 1.99 msaitoh ************************************************************************/
5898 1.1 dyoung static int
5899 1.98 msaitoh ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5900 1.1 dyoung {
5901 1.98 msaitoh struct sysctlnode node = *rnode;
5902 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5903 1.98 msaitoh int error = 0;
5904 1.98 msaitoh u32 new_wufc;
5905 1.52 msaitoh
5906 1.169 msaitoh /*
5907 1.169 msaitoh * It's not required to check recovery mode because this function never
5908 1.169 msaitoh * touches hardware.
5909 1.169 msaitoh */
5910 1.333 msaitoh new_wufc = sc->wufc;
5911 1.98 msaitoh node.sysctl_data = &new_wufc;
5912 1.52 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5913 1.98 msaitoh if ((error) || (newp == NULL))
5914 1.98 msaitoh return (error);
5915 1.333 msaitoh if (new_wufc == sc->wufc)
5916 1.98 msaitoh return (0);
5917 1.98 msaitoh
5918 1.98 msaitoh if (new_wufc & 0xffffff00)
5919 1.98 msaitoh return (EINVAL);
5920 1.99 msaitoh
5921 1.99 msaitoh new_wufc &= 0xff;
5922 1.333 msaitoh new_wufc |= (0xffffff & sc->wufc);
5923 1.333 msaitoh sc->wufc = new_wufc;
5924 1.52 msaitoh
5925 1.98 msaitoh return (0);
5926 1.99 msaitoh } /* ixgbe_sysctl_wufc */
5927 1.52 msaitoh
5928 1.98 msaitoh #ifdef IXGBE_DEBUG
5929 1.99 msaitoh /************************************************************************
5930 1.99 msaitoh * ixgbe_sysctl_print_rss_config
5931 1.99 msaitoh ************************************************************************/
5932 1.52 msaitoh static int
5933 1.98 msaitoh ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5934 1.52 msaitoh {
5935 1.99 msaitoh #ifdef notyet
5936 1.99 msaitoh struct sysctlnode node = *rnode;
5937 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5938 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5939 1.333 msaitoh device_t dev = sc->dev;
5940 1.186 msaitoh struct sbuf *buf;
5941 1.186 msaitoh int error = 0, reta_size;
5942 1.186 msaitoh u32 reg;
5943 1.1 dyoung
5944 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5945 1.169 msaitoh return (EPERM);
5946 1.169 msaitoh
5947 1.98 msaitoh buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5948 1.98 msaitoh if (!buf) {
5949 1.98 msaitoh device_printf(dev, "Could not allocate sbuf for output.\n");
5950 1.98 msaitoh return (ENOMEM);
5951 1.98 msaitoh }
5952 1.52 msaitoh
5953 1.98 msaitoh // TODO: use sbufs to make a string to print out
5954 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
5955 1.333 msaitoh switch (sc->hw.mac.type) {
5956 1.98 msaitoh case ixgbe_mac_X550:
5957 1.98 msaitoh case ixgbe_mac_X550EM_x:
5958 1.99 msaitoh case ixgbe_mac_X550EM_a:
5959 1.98 msaitoh reta_size = 128;
5960 1.98 msaitoh break;
5961 1.98 msaitoh default:
5962 1.98 msaitoh reta_size = 32;
5963 1.98 msaitoh break;
5964 1.43 msaitoh }
5965 1.1 dyoung
5966 1.98 msaitoh /* Print out the redirection table */
5967 1.98 msaitoh sbuf_cat(buf, "\n");
5968 1.98 msaitoh for (int i = 0; i < reta_size; i++) {
5969 1.98 msaitoh if (i < 32) {
5970 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5971 1.98 msaitoh sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5972 1.98 msaitoh } else {
5973 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5974 1.98 msaitoh sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5975 1.98 msaitoh }
5976 1.28 msaitoh }
5977 1.1 dyoung
5978 1.98 msaitoh // TODO: print more config
5979 1.43 msaitoh
5980 1.98 msaitoh error = sbuf_finish(buf);
5981 1.98 msaitoh if (error)
5982 1.98 msaitoh device_printf(dev, "Error finishing sbuf: %d\n", error);
5983 1.1 dyoung
5984 1.98 msaitoh sbuf_delete(buf);
5985 1.99 msaitoh #endif
5986 1.98 msaitoh return (0);
5987 1.99 msaitoh } /* ixgbe_sysctl_print_rss_config */
5988 1.98 msaitoh #endif /* IXGBE_DEBUG */
5989 1.24 msaitoh
5990 1.99 msaitoh /************************************************************************
5991 1.99 msaitoh * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5992 1.99 msaitoh *
5993 1.99 msaitoh * For X552/X557-AT devices using an external PHY
5994 1.99 msaitoh ************************************************************************/
5995 1.44 msaitoh static int
5996 1.44 msaitoh ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5997 1.44 msaitoh {
5998 1.44 msaitoh struct sysctlnode node = *rnode;
5999 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6000 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6001 1.44 msaitoh int val;
6002 1.44 msaitoh u16 reg;
6003 1.44 msaitoh int error;
6004 1.44 msaitoh
6005 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6006 1.169 msaitoh return (EPERM);
6007 1.169 msaitoh
6008 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6009 1.325 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6010 1.333 msaitoh device_printf(sc->dev,
6011 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6012 1.44 msaitoh return (ENODEV);
6013 1.44 msaitoh }
6014 1.44 msaitoh
6015 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
6016 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6017 1.333 msaitoh device_printf(sc->dev,
6018 1.44 msaitoh "Error reading from PHY's current temperature register\n");
6019 1.44 msaitoh return (EAGAIN);
6020 1.44 msaitoh }
6021 1.44 msaitoh
6022 1.44 msaitoh node.sysctl_data = &val;
6023 1.44 msaitoh
6024 1.44 msaitoh /* Shift temp for output */
6025 1.44 msaitoh val = reg >> 8;
6026 1.44 msaitoh
6027 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6028 1.44 msaitoh if ((error) || (newp == NULL))
6029 1.44 msaitoh return (error);
6030 1.44 msaitoh
6031 1.44 msaitoh return (0);
6032 1.99 msaitoh } /* ixgbe_sysctl_phy_temp */
6033 1.44 msaitoh
6034 1.99 msaitoh /************************************************************************
6035 1.99 msaitoh * ixgbe_sysctl_phy_overtemp_occurred
6036 1.99 msaitoh *
6037 1.99 msaitoh * Reports (directly from the PHY) whether the current PHY
6038 1.99 msaitoh * temperature is over the overtemp threshold.
6039 1.99 msaitoh ************************************************************************/
6040 1.44 msaitoh static int
6041 1.44 msaitoh ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
6042 1.44 msaitoh {
6043 1.44 msaitoh struct sysctlnode node = *rnode;
6044 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6045 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6046 1.44 msaitoh int val, error;
6047 1.44 msaitoh u16 reg;
6048 1.44 msaitoh
6049 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6050 1.169 msaitoh return (EPERM);
6051 1.169 msaitoh
6052 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6053 1.344 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6054 1.333 msaitoh device_printf(sc->dev,
6055 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6056 1.44 msaitoh return (ENODEV);
6057 1.44 msaitoh }
6058 1.44 msaitoh
6059 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
6060 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6061 1.333 msaitoh device_printf(sc->dev,
6062 1.44 msaitoh "Error reading from PHY's temperature status register\n");
6063 1.44 msaitoh return (EAGAIN);
6064 1.44 msaitoh }
6065 1.44 msaitoh
6066 1.44 msaitoh node.sysctl_data = &val;
6067 1.44 msaitoh
6068 1.44 msaitoh /* Get occurrence bit */
6069 1.44 msaitoh val = !!(reg & 0x4000);
6070 1.44 msaitoh
6071 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6072 1.44 msaitoh if ((error) || (newp == NULL))
6073 1.44 msaitoh return (error);
6074 1.44 msaitoh
6075 1.44 msaitoh return (0);
6076 1.99 msaitoh } /* ixgbe_sysctl_phy_overtemp_occurred */
6077 1.99 msaitoh
6078 1.99 msaitoh /************************************************************************
6079 1.99 msaitoh * ixgbe_sysctl_eee_state
6080 1.99 msaitoh *
6081 1.99 msaitoh * Sysctl to set EEE power saving feature
6082 1.99 msaitoh * Values:
6083 1.99 msaitoh * 0 - disable EEE
6084 1.99 msaitoh * 1 - enable EEE
6085 1.99 msaitoh * (none) - get current device EEE state
6086 1.99 msaitoh ************************************************************************/
6087 1.99 msaitoh static int
6088 1.99 msaitoh ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6089 1.99 msaitoh {
6090 1.99 msaitoh struct sysctlnode node = *rnode;
6091 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6092 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6093 1.333 msaitoh device_t dev = sc->dev;
6094 1.186 msaitoh int curr_eee, new_eee, error = 0;
6095 1.186 msaitoh s32 retval;
6096 1.99 msaitoh
6097 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6098 1.169 msaitoh return (EPERM);
6099 1.169 msaitoh
6100 1.333 msaitoh curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
6101 1.99 msaitoh node.sysctl_data = &new_eee;
6102 1.99 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6103 1.99 msaitoh if ((error) || (newp == NULL))
6104 1.99 msaitoh return (error);
6105 1.99 msaitoh
6106 1.99 msaitoh /* Nothing to do */
6107 1.99 msaitoh if (new_eee == curr_eee)
6108 1.99 msaitoh return (0);
6109 1.99 msaitoh
6110 1.99 msaitoh /* Not supported */
6111 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
6112 1.99 msaitoh return (EINVAL);
6113 1.99 msaitoh
6114 1.99 msaitoh /* Bounds checking */
6115 1.99 msaitoh if ((new_eee < 0) || (new_eee > 1))
6116 1.99 msaitoh return (EINVAL);
6117 1.99 msaitoh
6118 1.333 msaitoh retval = ixgbe_setup_eee(&sc->hw, new_eee);
6119 1.99 msaitoh if (retval) {
6120 1.99 msaitoh device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6121 1.99 msaitoh return (EINVAL);
6122 1.99 msaitoh }
6123 1.99 msaitoh
6124 1.99 msaitoh /* Restart auto-neg */
6125 1.302 riastrad if_init(ifp);
6126 1.99 msaitoh
6127 1.99 msaitoh device_printf(dev, "New EEE state: %d\n", new_eee);
6128 1.99 msaitoh
6129 1.99 msaitoh /* Cache new value */
6130 1.99 msaitoh if (new_eee)
6131 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6132 1.99 msaitoh else
6133 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_EEE;
6134 1.99 msaitoh
6135 1.99 msaitoh return (error);
6136 1.99 msaitoh } /* ixgbe_sysctl_eee_state */
6137 1.99 msaitoh
6138 1.333 msaitoh #define PRINTQS(sc, regname) \
6139 1.158 msaitoh do { \
6140 1.333 msaitoh struct ixgbe_hw *_hw = &(sc)->hw; \
6141 1.158 msaitoh int _i; \
6142 1.158 msaitoh \
6143 1.333 msaitoh printf("%s: %s", device_xname((sc)->dev), #regname); \
6144 1.333 msaitoh for (_i = 0; _i < (sc)->num_queues; _i++) { \
6145 1.158 msaitoh printf((_i == 0) ? "\t" : " "); \
6146 1.158 msaitoh printf("%08x", IXGBE_READ_REG(_hw, \
6147 1.158 msaitoh IXGBE_##regname(_i))); \
6148 1.158 msaitoh } \
6149 1.158 msaitoh printf("\n"); \
6150 1.158 msaitoh } while (0)
6151 1.158 msaitoh
6152 1.158 msaitoh /************************************************************************
6153 1.158 msaitoh * ixgbe_print_debug_info
6154 1.158 msaitoh *
6155 1.158 msaitoh * Called only when em_display_debug_stats is enabled.
6156 1.158 msaitoh * Provides a way to take a look at important statistics
6157 1.158 msaitoh * maintained by the driver and hardware.
6158 1.158 msaitoh ************************************************************************/
6159 1.158 msaitoh static void
6160 1.333 msaitoh ixgbe_print_debug_info(struct ixgbe_softc *sc)
6161 1.158 msaitoh {
6162 1.333 msaitoh device_t dev = sc->dev;
6163 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6164 1.158 msaitoh int table_size;
6165 1.158 msaitoh int i;
6166 1.158 msaitoh
6167 1.333 msaitoh switch (sc->hw.mac.type) {
6168 1.158 msaitoh case ixgbe_mac_X550:
6169 1.158 msaitoh case ixgbe_mac_X550EM_x:
6170 1.158 msaitoh case ixgbe_mac_X550EM_a:
6171 1.158 msaitoh table_size = 128;
6172 1.158 msaitoh break;
6173 1.158 msaitoh default:
6174 1.158 msaitoh table_size = 32;
6175 1.158 msaitoh break;
6176 1.158 msaitoh }
6177 1.185 msaitoh
6178 1.158 msaitoh device_printf(dev, "[E]RETA:\n");
6179 1.158 msaitoh for (i = 0; i < table_size; i++) {
6180 1.158 msaitoh if (i < 32)
6181 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6182 1.158 msaitoh IXGBE_RETA(i)));
6183 1.158 msaitoh else
6184 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6185 1.158 msaitoh IXGBE_ERETA(i - 32)));
6186 1.158 msaitoh }
6187 1.158 msaitoh
6188 1.158 msaitoh device_printf(dev, "queue:");
6189 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6190 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6191 1.158 msaitoh printf("%8d", i);
6192 1.158 msaitoh }
6193 1.158 msaitoh printf("\n");
6194 1.333 msaitoh PRINTQS(sc, RDBAL);
6195 1.333 msaitoh PRINTQS(sc, RDBAH);
6196 1.333 msaitoh PRINTQS(sc, RDLEN);
6197 1.333 msaitoh PRINTQS(sc, SRRCTL);
6198 1.333 msaitoh PRINTQS(sc, RDH);
6199 1.333 msaitoh PRINTQS(sc, RDT);
6200 1.333 msaitoh PRINTQS(sc, RXDCTL);
6201 1.158 msaitoh
6202 1.158 msaitoh device_printf(dev, "RQSMR:");
6203 1.333 msaitoh for (i = 0; i < sc->num_queues / 4; i++) {
6204 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6205 1.158 msaitoh printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6206 1.158 msaitoh }
6207 1.158 msaitoh printf("\n");
6208 1.158 msaitoh
6209 1.158 msaitoh device_printf(dev, "disabled_count:");
6210 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6211 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6212 1.333 msaitoh printf("%8d", sc->queues[i].disabled_count);
6213 1.158 msaitoh }
6214 1.158 msaitoh printf("\n");
6215 1.185 msaitoh
6216 1.158 msaitoh device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6217 1.158 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
6218 1.158 msaitoh device_printf(dev, "EIMS_EX(0):\t%08x\n",
6219 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6220 1.158 msaitoh device_printf(dev, "EIMS_EX(1):\t%08x\n",
6221 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6222 1.158 msaitoh }
6223 1.265 msaitoh device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6224 1.265 msaitoh device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6225 1.158 msaitoh } /* ixgbe_print_debug_info */
6226 1.158 msaitoh
6227 1.158 msaitoh /************************************************************************
6228 1.158 msaitoh * ixgbe_sysctl_debug
6229 1.158 msaitoh ************************************************************************/
6230 1.158 msaitoh static int
6231 1.158 msaitoh ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6232 1.158 msaitoh {
6233 1.158 msaitoh struct sysctlnode node = *rnode;
6234 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6235 1.186 msaitoh int error, result = 0;
6236 1.158 msaitoh
6237 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6238 1.169 msaitoh return (EPERM);
6239 1.169 msaitoh
6240 1.158 msaitoh node.sysctl_data = &result;
6241 1.158 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6242 1.158 msaitoh
6243 1.158 msaitoh if (error || newp == NULL)
6244 1.158 msaitoh return error;
6245 1.158 msaitoh
6246 1.158 msaitoh if (result == 1)
6247 1.333 msaitoh ixgbe_print_debug_info(sc);
6248 1.158 msaitoh
6249 1.158 msaitoh return 0;
6250 1.158 msaitoh } /* ixgbe_sysctl_debug */
6251 1.158 msaitoh
6252 1.99 msaitoh /************************************************************************
6253 1.286 msaitoh * ixgbe_sysctl_rx_copy_len
6254 1.286 msaitoh ************************************************************************/
6255 1.286 msaitoh static int
6256 1.286 msaitoh ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6257 1.286 msaitoh {
6258 1.286 msaitoh struct sysctlnode node = *rnode;
6259 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6260 1.286 msaitoh int error;
6261 1.333 msaitoh int result = sc->rx_copy_len;
6262 1.286 msaitoh
6263 1.286 msaitoh node.sysctl_data = &result;
6264 1.286 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6265 1.286 msaitoh
6266 1.286 msaitoh if (error || newp == NULL)
6267 1.286 msaitoh return error;
6268 1.286 msaitoh
6269 1.286 msaitoh if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6270 1.286 msaitoh return EINVAL;
6271 1.286 msaitoh
6272 1.333 msaitoh sc->rx_copy_len = result;
6273 1.286 msaitoh
6274 1.286 msaitoh return 0;
6275 1.286 msaitoh } /* ixgbe_sysctl_rx_copy_len */
6276 1.286 msaitoh
6277 1.286 msaitoh /************************************************************************
6278 1.313 msaitoh * ixgbe_sysctl_tx_process_limit
6279 1.313 msaitoh ************************************************************************/
6280 1.313 msaitoh static int
6281 1.313 msaitoh ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6282 1.313 msaitoh {
6283 1.313 msaitoh struct sysctlnode node = *rnode;
6284 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6285 1.313 msaitoh int error;
6286 1.333 msaitoh int result = sc->tx_process_limit;
6287 1.313 msaitoh
6288 1.313 msaitoh node.sysctl_data = &result;
6289 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6290 1.313 msaitoh
6291 1.313 msaitoh if (error || newp == NULL)
6292 1.313 msaitoh return error;
6293 1.313 msaitoh
6294 1.333 msaitoh if ((result <= 0) || (result > sc->num_tx_desc))
6295 1.313 msaitoh return EINVAL;
6296 1.313 msaitoh
6297 1.333 msaitoh sc->tx_process_limit = result;
6298 1.313 msaitoh
6299 1.313 msaitoh return 0;
6300 1.313 msaitoh } /* ixgbe_sysctl_tx_process_limit */
6301 1.313 msaitoh
6302 1.313 msaitoh /************************************************************************
6303 1.313 msaitoh * ixgbe_sysctl_rx_process_limit
6304 1.313 msaitoh ************************************************************************/
6305 1.313 msaitoh static int
6306 1.313 msaitoh ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6307 1.313 msaitoh {
6308 1.313 msaitoh struct sysctlnode node = *rnode;
6309 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6310 1.313 msaitoh int error;
6311 1.333 msaitoh int result = sc->rx_process_limit;
6312 1.313 msaitoh
6313 1.313 msaitoh node.sysctl_data = &result;
6314 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6315 1.313 msaitoh
6316 1.313 msaitoh if (error || newp == NULL)
6317 1.313 msaitoh return error;
6318 1.313 msaitoh
6319 1.333 msaitoh if ((result <= 0) || (result > sc->num_rx_desc))
6320 1.313 msaitoh return EINVAL;
6321 1.313 msaitoh
6322 1.333 msaitoh sc->rx_process_limit = result;
6323 1.313 msaitoh
6324 1.313 msaitoh return 0;
6325 1.313 msaitoh } /* ixgbe_sysctl_rx_process_limit */
6326 1.313 msaitoh
6327 1.313 msaitoh /************************************************************************
6328 1.99 msaitoh * ixgbe_init_device_features
6329 1.99 msaitoh ************************************************************************/
6330 1.99 msaitoh static void
6331 1.333 msaitoh ixgbe_init_device_features(struct ixgbe_softc *sc)
6332 1.99 msaitoh {
6333 1.333 msaitoh sc->feat_cap = IXGBE_FEATURE_NETMAP
6334 1.186 msaitoh | IXGBE_FEATURE_RSS
6335 1.186 msaitoh | IXGBE_FEATURE_MSI
6336 1.186 msaitoh | IXGBE_FEATURE_MSIX
6337 1.186 msaitoh | IXGBE_FEATURE_LEGACY_IRQ
6338 1.186 msaitoh | IXGBE_FEATURE_LEGACY_TX;
6339 1.99 msaitoh
6340 1.99 msaitoh /* Set capabilities first... */
6341 1.333 msaitoh switch (sc->hw.mac.type) {
6342 1.99 msaitoh case ixgbe_mac_82598EB:
6343 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
6344 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6345 1.99 msaitoh break;
6346 1.99 msaitoh case ixgbe_mac_X540:
6347 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6348 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6349 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6350 1.333 msaitoh (sc->hw.bus.func == 0))
6351 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6352 1.99 msaitoh break;
6353 1.99 msaitoh case ixgbe_mac_X550:
6354 1.169 msaitoh /*
6355 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6356 1.169 msaitoh * NVM Image version.
6357 1.169 msaitoh */
6358 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6359 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6360 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6361 1.99 msaitoh break;
6362 1.99 msaitoh case ixgbe_mac_X550EM_x:
6363 1.169 msaitoh /*
6364 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6365 1.169 msaitoh * NVM Image version.
6366 1.169 msaitoh */
6367 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6368 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6369 1.99 msaitoh break;
6370 1.99 msaitoh case ixgbe_mac_X550EM_a:
6371 1.169 msaitoh /*
6372 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6373 1.169 msaitoh * NVM Image version.
6374 1.169 msaitoh */
6375 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6376 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6377 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6378 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6379 1.333 msaitoh (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6380 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6381 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_EEE;
6382 1.99 msaitoh }
6383 1.99 msaitoh break;
6384 1.99 msaitoh case ixgbe_mac_82599EB:
6385 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6386 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6387 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6388 1.333 msaitoh (sc->hw.bus.func == 0))
6389 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6390 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6391 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6392 1.99 msaitoh break;
6393 1.99 msaitoh default:
6394 1.99 msaitoh break;
6395 1.99 msaitoh }
6396 1.99 msaitoh
6397 1.99 msaitoh /* Enabled by default... */
6398 1.99 msaitoh /* Fan failure detection */
6399 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6400 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6401 1.99 msaitoh /* Netmap */
6402 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
6403 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_NETMAP;
6404 1.99 msaitoh /* EEE */
6405 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
6406 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6407 1.99 msaitoh /* Thermal Sensor */
6408 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6409 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6410 1.169 msaitoh /*
6411 1.169 msaitoh * Recovery mode:
6412 1.169 msaitoh * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6413 1.169 msaitoh * NVM Image version.
6414 1.169 msaitoh */
6415 1.99 msaitoh
6416 1.99 msaitoh /* Enabled via global sysctl... */
6417 1.99 msaitoh /* Flow Director */
6418 1.99 msaitoh if (ixgbe_enable_fdir) {
6419 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FDIR)
6420 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FDIR;
6421 1.99 msaitoh else
6422 1.333 msaitoh device_printf(sc->dev, "Device does not support "
6423 1.320 msaitoh "Flow Director. Leaving disabled.");
6424 1.99 msaitoh }
6425 1.99 msaitoh /* Legacy (single queue) transmit */
6426 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6427 1.99 msaitoh ixgbe_enable_legacy_tx)
6428 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6429 1.99 msaitoh /*
6430 1.99 msaitoh * Message Signal Interrupts - Extended (MSI-X)
6431 1.99 msaitoh * Normal MSI is only enabled if MSI-X calls fail.
6432 1.99 msaitoh */
6433 1.99 msaitoh if (!ixgbe_enable_msix)
6434 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
6435 1.99 msaitoh /* Receive-Side Scaling (RSS) */
6436 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6437 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RSS;
6438 1.99 msaitoh
6439 1.99 msaitoh /* Disable features with unmet dependencies... */
6440 1.99 msaitoh /* No MSI-X */
6441 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
6442 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
6443 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6444 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
6445 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
6446 1.99 msaitoh }
6447 1.99 msaitoh } /* ixgbe_init_device_features */
6448 1.44 msaitoh
6449 1.99 msaitoh /************************************************************************
6450 1.99 msaitoh * ixgbe_probe - Device identification routine
6451 1.98 msaitoh *
6452 1.99 msaitoh * Determines if the driver should be loaded on
6453 1.99 msaitoh * adapter based on its PCI vendor/device ID.
6454 1.98 msaitoh *
6455 1.99 msaitoh * return BUS_PROBE_DEFAULT on success, positive on failure
6456 1.99 msaitoh ************************************************************************/
6457 1.98 msaitoh static int
6458 1.98 msaitoh ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6459 1.98 msaitoh {
6460 1.98 msaitoh const struct pci_attach_args *pa = aux;
6461 1.98 msaitoh
6462 1.98 msaitoh return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6463 1.98 msaitoh }
6464 1.98 msaitoh
6465 1.159 maxv static const ixgbe_vendor_info_t *
6466 1.98 msaitoh ixgbe_lookup(const struct pci_attach_args *pa)
6467 1.98 msaitoh {
6468 1.159 maxv const ixgbe_vendor_info_t *ent;
6469 1.98 msaitoh pcireg_t subid;
6470 1.98 msaitoh
6471 1.98 msaitoh INIT_DEBUGOUT("ixgbe_lookup: begin");
6472 1.98 msaitoh
6473 1.98 msaitoh if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6474 1.98 msaitoh return NULL;
6475 1.98 msaitoh
6476 1.98 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6477 1.98 msaitoh
6478 1.98 msaitoh for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6479 1.99 msaitoh if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6480 1.99 msaitoh (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6481 1.99 msaitoh ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6482 1.99 msaitoh (ent->subvendor_id == 0)) &&
6483 1.99 msaitoh ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6484 1.99 msaitoh (ent->subdevice_id == 0))) {
6485 1.98 msaitoh return ent;
6486 1.98 msaitoh }
6487 1.98 msaitoh }
6488 1.98 msaitoh return NULL;
6489 1.98 msaitoh }
6490 1.98 msaitoh
6491 1.98 msaitoh static int
6492 1.98 msaitoh ixgbe_ifflags_cb(struct ethercom *ec)
6493 1.98 msaitoh {
6494 1.98 msaitoh struct ifnet *ifp = &ec->ec_if;
6495 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6496 1.210 msaitoh u_short change;
6497 1.210 msaitoh int rv = 0;
6498 1.98 msaitoh
6499 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6500 1.98 msaitoh
6501 1.333 msaitoh change = ifp->if_flags ^ sc->if_flags;
6502 1.98 msaitoh if (change != 0)
6503 1.333 msaitoh sc->if_flags = ifp->if_flags;
6504 1.98 msaitoh
6505 1.192 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6506 1.192 msaitoh rv = ENETRESET;
6507 1.192 msaitoh goto out;
6508 1.192 msaitoh } else if ((change & IFF_PROMISC) != 0)
6509 1.333 msaitoh ixgbe_set_rxfilter(sc);
6510 1.98 msaitoh
6511 1.193 msaitoh /* Check for ec_capenable. */
6512 1.333 msaitoh change = ec->ec_capenable ^ sc->ec_capenable;
6513 1.333 msaitoh sc->ec_capenable = ec->ec_capenable;
6514 1.193 msaitoh if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6515 1.193 msaitoh | ETHERCAP_VLAN_HWFILTER)) != 0) {
6516 1.193 msaitoh rv = ENETRESET;
6517 1.193 msaitoh goto out;
6518 1.193 msaitoh }
6519 1.193 msaitoh
6520 1.193 msaitoh /*
6521 1.193 msaitoh * Special handling is not required for ETHERCAP_VLAN_MTU.
6522 1.193 msaitoh * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6523 1.193 msaitoh */
6524 1.193 msaitoh
6525 1.98 msaitoh /* Set up VLAN support and filter */
6526 1.193 msaitoh if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6527 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
6528 1.98 msaitoh
6529 1.192 msaitoh out:
6530 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6531 1.98 msaitoh
6532 1.192 msaitoh return rv;
6533 1.98 msaitoh }
6534 1.98 msaitoh
6535 1.99 msaitoh /************************************************************************
6536 1.99 msaitoh * ixgbe_ioctl - Ioctl entry point
6537 1.98 msaitoh *
6538 1.99 msaitoh * Called when the user wants to configure the interface.
6539 1.98 msaitoh *
6540 1.99 msaitoh * return 0 on success, positive on failure
6541 1.99 msaitoh ************************************************************************/
6542 1.98 msaitoh static int
6543 1.232 msaitoh ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6544 1.98 msaitoh {
6545 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6546 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6547 1.98 msaitoh struct ifcapreq *ifcr = data;
6548 1.98 msaitoh struct ifreq *ifr = data;
6549 1.186 msaitoh int error = 0;
6550 1.98 msaitoh int l4csum_en;
6551 1.185 msaitoh const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6552 1.185 msaitoh IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6553 1.98 msaitoh
6554 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6555 1.169 msaitoh return (EPERM);
6556 1.169 msaitoh
6557 1.98 msaitoh switch (command) {
6558 1.98 msaitoh case SIOCSIFFLAGS:
6559 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6560 1.98 msaitoh break;
6561 1.98 msaitoh case SIOCADDMULTI:
6562 1.98 msaitoh case SIOCDELMULTI:
6563 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6564 1.98 msaitoh break;
6565 1.98 msaitoh case SIOCSIFMEDIA:
6566 1.98 msaitoh case SIOCGIFMEDIA:
6567 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6568 1.98 msaitoh break;
6569 1.98 msaitoh case SIOCSIFCAP:
6570 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6571 1.98 msaitoh break;
6572 1.98 msaitoh case SIOCSIFMTU:
6573 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6574 1.98 msaitoh break;
6575 1.98 msaitoh #ifdef __NetBSD__
6576 1.98 msaitoh case SIOCINITIFADDR:
6577 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6578 1.98 msaitoh break;
6579 1.98 msaitoh case SIOCGIFFLAGS:
6580 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6581 1.98 msaitoh break;
6582 1.98 msaitoh case SIOCGIFAFLAG_IN:
6583 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6584 1.98 msaitoh break;
6585 1.98 msaitoh case SIOCGIFADDR:
6586 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6587 1.98 msaitoh break;
6588 1.98 msaitoh case SIOCGIFMTU:
6589 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6590 1.98 msaitoh break;
6591 1.98 msaitoh case SIOCGIFCAP:
6592 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6593 1.98 msaitoh break;
6594 1.98 msaitoh case SIOCGETHERCAP:
6595 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6596 1.98 msaitoh break;
6597 1.98 msaitoh case SIOCGLIFADDR:
6598 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6599 1.98 msaitoh break;
6600 1.98 msaitoh case SIOCZIFDATA:
6601 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6602 1.98 msaitoh hw->mac.ops.clear_hw_cntrs(hw);
6603 1.333 msaitoh ixgbe_clear_evcnt(sc);
6604 1.98 msaitoh break;
6605 1.98 msaitoh case SIOCAIFADDR:
6606 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6607 1.98 msaitoh break;
6608 1.98 msaitoh #endif
6609 1.98 msaitoh default:
6610 1.98 msaitoh IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6611 1.98 msaitoh break;
6612 1.98 msaitoh }
6613 1.24 msaitoh
6614 1.98 msaitoh switch (command) {
6615 1.98 msaitoh case SIOCGI2C:
6616 1.98 msaitoh {
6617 1.98 msaitoh struct ixgbe_i2c_req i2c;
6618 1.24 msaitoh
6619 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6620 1.98 msaitoh error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6621 1.98 msaitoh if (error != 0)
6622 1.98 msaitoh break;
6623 1.98 msaitoh if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6624 1.98 msaitoh error = EINVAL;
6625 1.98 msaitoh break;
6626 1.98 msaitoh }
6627 1.98 msaitoh if (i2c.len > sizeof(i2c.data)) {
6628 1.98 msaitoh error = EINVAL;
6629 1.98 msaitoh break;
6630 1.98 msaitoh }
6631 1.24 msaitoh
6632 1.98 msaitoh hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6633 1.98 msaitoh i2c.dev_addr, i2c.data);
6634 1.98 msaitoh error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6635 1.98 msaitoh break;
6636 1.98 msaitoh }
6637 1.98 msaitoh case SIOCSIFCAP:
6638 1.98 msaitoh /* Layer-4 Rx checksum offload has to be turned on and
6639 1.98 msaitoh * off as a unit.
6640 1.98 msaitoh */
6641 1.98 msaitoh l4csum_en = ifcr->ifcr_capenable & l4csum;
6642 1.98 msaitoh if (l4csum_en != l4csum && l4csum_en != 0)
6643 1.98 msaitoh return EINVAL;
6644 1.98 msaitoh /*FALLTHROUGH*/
6645 1.98 msaitoh case SIOCADDMULTI:
6646 1.98 msaitoh case SIOCDELMULTI:
6647 1.98 msaitoh case SIOCSIFFLAGS:
6648 1.98 msaitoh case SIOCSIFMTU:
6649 1.98 msaitoh default:
6650 1.98 msaitoh if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6651 1.98 msaitoh return error;
6652 1.98 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
6653 1.98 msaitoh ;
6654 1.98 msaitoh else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6655 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6656 1.135 msaitoh if ((ifp->if_flags & IFF_RUNNING) != 0)
6657 1.333 msaitoh ixgbe_init_locked(sc);
6658 1.333 msaitoh ixgbe_recalculate_max_frame(sc);
6659 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6660 1.98 msaitoh } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6661 1.98 msaitoh /*
6662 1.98 msaitoh * Multicast list has changed; set the hardware filter
6663 1.98 msaitoh * accordingly.
6664 1.98 msaitoh */
6665 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6666 1.333 msaitoh ixgbe_disable_intr(sc);
6667 1.333 msaitoh ixgbe_set_rxfilter(sc);
6668 1.333 msaitoh ixgbe_enable_intr(sc);
6669 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6670 1.98 msaitoh }
6671 1.98 msaitoh return 0;
6672 1.24 msaitoh }
6673 1.24 msaitoh
6674 1.98 msaitoh return error;
6675 1.99 msaitoh } /* ixgbe_ioctl */
6676 1.99 msaitoh
6677 1.99 msaitoh /************************************************************************
6678 1.99 msaitoh * ixgbe_check_fan_failure
6679 1.99 msaitoh ************************************************************************/
6680 1.274 msaitoh static int
6681 1.333 msaitoh ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
6682 1.99 msaitoh {
6683 1.99 msaitoh u32 mask;
6684 1.99 msaitoh
6685 1.333 msaitoh mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
6686 1.99 msaitoh IXGBE_ESDP_SDP1;
6687 1.26 msaitoh
6688 1.312 msaitoh if ((reg & mask) == 0)
6689 1.312 msaitoh return IXGBE_SUCCESS;
6690 1.312 msaitoh
6691 1.312 msaitoh /*
6692 1.312 msaitoh * Use ratecheck() just in case interrupt occur frequently.
6693 1.312 msaitoh * When EXPX9501AT's fan stopped, interrupt occurred only once,
6694 1.312 msaitoh * an red LED on the board turned on and link never up until
6695 1.312 msaitoh * power off.
6696 1.312 msaitoh */
6697 1.333 msaitoh if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl))
6698 1.333 msaitoh device_printf(sc->dev,
6699 1.280 msaitoh "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6700 1.274 msaitoh
6701 1.312 msaitoh return IXGBE_ERR_FAN_FAILURE;
6702 1.99 msaitoh } /* ixgbe_check_fan_failure */
6703 1.99 msaitoh
6704 1.99 msaitoh /************************************************************************
6705 1.99 msaitoh * ixgbe_handle_que
6706 1.99 msaitoh ************************************************************************/
6707 1.98 msaitoh static void
6708 1.98 msaitoh ixgbe_handle_que(void *context)
6709 1.44 msaitoh {
6710 1.98 msaitoh struct ix_queue *que = context;
6711 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
6712 1.186 msaitoh struct tx_ring *txr = que->txr;
6713 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6714 1.121 msaitoh bool more = false;
6715 1.44 msaitoh
6716 1.305 msaitoh IXGBE_EVC_ADD(&que->handleq, 1);
6717 1.44 msaitoh
6718 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING) {
6719 1.98 msaitoh IXGBE_TX_LOCK(txr);
6720 1.323 msaitoh more = ixgbe_txeof(txr);
6721 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
6722 1.99 msaitoh if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6723 1.99 msaitoh ixgbe_mq_start_locked(ifp, txr);
6724 1.98 msaitoh /* Only for queue 0 */
6725 1.99 msaitoh /* NetBSD still needs this for CBQ */
6726 1.333 msaitoh if ((&sc->queues[0] == que)
6727 1.99 msaitoh && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6728 1.99 msaitoh ixgbe_legacy_start_locked(ifp, txr);
6729 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
6730 1.323 msaitoh more |= ixgbe_rxeof(que);
6731 1.44 msaitoh }
6732 1.44 msaitoh
6733 1.128 knakahar if (more) {
6734 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
6735 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
6736 1.128 knakahar } else if (que->res != NULL) {
6737 1.265 msaitoh /* MSIX: Re-enable this interrupt */
6738 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
6739 1.265 msaitoh } else {
6740 1.265 msaitoh /* INTx or MSI */
6741 1.333 msaitoh ixgbe_enable_queue(sc, 0);
6742 1.265 msaitoh }
6743 1.99 msaitoh
6744 1.98 msaitoh return;
6745 1.99 msaitoh } /* ixgbe_handle_que */
6746 1.44 msaitoh
6747 1.99 msaitoh /************************************************************************
6748 1.128 knakahar * ixgbe_handle_que_work
6749 1.128 knakahar ************************************************************************/
6750 1.128 knakahar static void
6751 1.128 knakahar ixgbe_handle_que_work(struct work *wk, void *context)
6752 1.128 knakahar {
6753 1.128 knakahar struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6754 1.128 knakahar
6755 1.128 knakahar /*
6756 1.128 knakahar * "enqueued flag" is not required here.
6757 1.128 knakahar * See ixgbe_msix_que().
6758 1.128 knakahar */
6759 1.128 knakahar ixgbe_handle_que(que);
6760 1.128 knakahar }
6761 1.128 knakahar
6762 1.128 knakahar /************************************************************************
6763 1.99 msaitoh * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6764 1.99 msaitoh ************************************************************************/
6765 1.48 msaitoh static int
6766 1.333 msaitoh ixgbe_allocate_legacy(struct ixgbe_softc *sc,
6767 1.98 msaitoh const struct pci_attach_args *pa)
6768 1.48 msaitoh {
6769 1.333 msaitoh device_t dev = sc->dev;
6770 1.333 msaitoh struct ix_queue *que = sc->queues;
6771 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
6772 1.98 msaitoh int counts[PCI_INTR_TYPE_SIZE];
6773 1.98 msaitoh pci_intr_type_t intr_type, max_type;
6774 1.186 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6775 1.206 knakahar char wqname[MAXCOMLEN];
6776 1.98 msaitoh const char *intrstr = NULL;
6777 1.206 knakahar int defertx_error = 0, error;
6778 1.185 msaitoh
6779 1.99 msaitoh /* We allocate a single interrupt resource */
6780 1.98 msaitoh max_type = PCI_INTR_TYPE_MSI;
6781 1.98 msaitoh counts[PCI_INTR_TYPE_MSIX] = 0;
6782 1.99 msaitoh counts[PCI_INTR_TYPE_MSI] =
6783 1.333 msaitoh (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6784 1.118 msaitoh /* Check not feat_en but feat_cap to fallback to INTx */
6785 1.99 msaitoh counts[PCI_INTR_TYPE_INTX] =
6786 1.333 msaitoh (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6787 1.48 msaitoh
6788 1.98 msaitoh alloc_retry:
6789 1.333 msaitoh if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) {
6790 1.98 msaitoh aprint_error_dev(dev, "couldn't alloc interrupt\n");
6791 1.98 msaitoh return ENXIO;
6792 1.98 msaitoh }
6793 1.333 msaitoh sc->osdep.nintrs = 1;
6794 1.333 msaitoh intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0],
6795 1.98 msaitoh intrbuf, sizeof(intrbuf));
6796 1.333 msaitoh sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc,
6797 1.333 msaitoh sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6798 1.98 msaitoh device_xname(dev));
6799 1.333 msaitoh intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]);
6800 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6801 1.98 msaitoh aprint_error_dev(dev,"unable to establish %s\n",
6802 1.98 msaitoh (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6803 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6804 1.333 msaitoh sc->osdep.intrs = NULL;
6805 1.98 msaitoh switch (intr_type) {
6806 1.98 msaitoh case PCI_INTR_TYPE_MSI:
6807 1.98 msaitoh /* The next try is for INTx: Disable MSI */
6808 1.98 msaitoh max_type = PCI_INTR_TYPE_INTX;
6809 1.98 msaitoh counts[PCI_INTR_TYPE_INTX] = 1;
6810 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6811 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6812 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6813 1.118 msaitoh goto alloc_retry;
6814 1.118 msaitoh } else
6815 1.118 msaitoh break;
6816 1.98 msaitoh case PCI_INTR_TYPE_INTX:
6817 1.98 msaitoh default:
6818 1.98 msaitoh /* See below */
6819 1.98 msaitoh break;
6820 1.98 msaitoh }
6821 1.98 msaitoh }
6822 1.119 msaitoh if (intr_type == PCI_INTR_TYPE_INTX) {
6823 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6824 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6825 1.119 msaitoh }
6826 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6827 1.98 msaitoh aprint_error_dev(dev,
6828 1.98 msaitoh "couldn't establish interrupt%s%s\n",
6829 1.98 msaitoh intrstr ? " at " : "", intrstr ? intrstr : "");
6830 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6831 1.333 msaitoh sc->osdep.intrs = NULL;
6832 1.98 msaitoh return ENXIO;
6833 1.98 msaitoh }
6834 1.98 msaitoh aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6835 1.98 msaitoh /*
6836 1.98 msaitoh * Try allocating a fast interrupt and the associated deferred
6837 1.98 msaitoh * processing contexts.
6838 1.98 msaitoh */
6839 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6840 1.99 msaitoh txr->txr_si =
6841 1.229 msaitoh softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6842 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6843 1.206 knakahar
6844 1.280 msaitoh snprintf(wqname, sizeof(wqname), "%sdeferTx",
6845 1.280 msaitoh device_xname(dev));
6846 1.333 msaitoh defertx_error = workqueue_create(&sc->txr_wq, wqname,
6847 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI,
6848 1.206 knakahar IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6849 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6850 1.206 knakahar }
6851 1.229 msaitoh que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6852 1.98 msaitoh ixgbe_handle_que, que);
6853 1.206 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6854 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
6855 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
6856 1.206 knakahar IXGBE_WORKQUEUE_FLAGS);
6857 1.48 msaitoh
6858 1.333 msaitoh if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)
6859 1.206 knakahar && ((txr->txr_si == NULL) || defertx_error != 0))
6860 1.206 knakahar || (que->que_si == NULL) || error != 0) {
6861 1.98 msaitoh aprint_error_dev(dev,
6862 1.185 msaitoh "could not establish software interrupts\n");
6863 1.99 msaitoh
6864 1.98 msaitoh return ENXIO;
6865 1.98 msaitoh }
6866 1.98 msaitoh /* For simplicity in the handlers */
6867 1.333 msaitoh sc->active_queues = IXGBE_EIMS_ENABLE_MASK;
6868 1.44 msaitoh
6869 1.44 msaitoh return (0);
6870 1.99 msaitoh } /* ixgbe_allocate_legacy */
6871 1.44 msaitoh
6872 1.99 msaitoh /************************************************************************
6873 1.99 msaitoh * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6874 1.99 msaitoh ************************************************************************/
6875 1.44 msaitoh static int
6876 1.333 msaitoh ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
6877 1.44 msaitoh {
6878 1.333 msaitoh device_t dev = sc->dev;
6879 1.341 msaitoh struct ix_queue *que = sc->queues;
6880 1.341 msaitoh struct tx_ring *txr = sc->tx_rings;
6881 1.98 msaitoh pci_chipset_tag_t pc;
6882 1.98 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6883 1.98 msaitoh char intr_xname[32];
6884 1.128 knakahar char wqname[MAXCOMLEN];
6885 1.98 msaitoh const char *intrstr = NULL;
6886 1.186 msaitoh int error, vector = 0;
6887 1.98 msaitoh int cpu_id = 0;
6888 1.98 msaitoh kcpuset_t *affinity;
6889 1.99 msaitoh #ifdef RSS
6890 1.186 msaitoh unsigned int rss_buckets = 0;
6891 1.99 msaitoh kcpuset_t cpu_mask;
6892 1.98 msaitoh #endif
6893 1.98 msaitoh
6894 1.333 msaitoh pc = sc->osdep.pc;
6895 1.98 msaitoh #ifdef RSS
6896 1.98 msaitoh /*
6897 1.98 msaitoh * If we're doing RSS, the number of queues needs to
6898 1.98 msaitoh * match the number of RSS buckets that are configured.
6899 1.98 msaitoh *
6900 1.98 msaitoh * + If there's more queues than RSS buckets, we'll end
6901 1.98 msaitoh * up with queues that get no traffic.
6902 1.98 msaitoh *
6903 1.98 msaitoh * + If there's more RSS buckets than queues, we'll end
6904 1.98 msaitoh * up having multiple RSS buckets map to the same queue,
6905 1.98 msaitoh * so there'll be some contention.
6906 1.98 msaitoh */
6907 1.99 msaitoh rss_buckets = rss_getnumbuckets();
6908 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
6909 1.333 msaitoh (sc->num_queues != rss_buckets)) {
6910 1.98 msaitoh device_printf(dev,
6911 1.98 msaitoh "%s: number of queues (%d) != number of RSS buckets (%d)"
6912 1.98 msaitoh "; performance will be impacted.\n",
6913 1.333 msaitoh __func__, sc->num_queues, rss_buckets);
6914 1.98 msaitoh }
6915 1.98 msaitoh #endif
6916 1.98 msaitoh
6917 1.333 msaitoh sc->osdep.nintrs = sc->num_queues + 1;
6918 1.333 msaitoh if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
6919 1.333 msaitoh sc->osdep.nintrs) != 0) {
6920 1.98 msaitoh aprint_error_dev(dev,
6921 1.98 msaitoh "failed to allocate MSI-X interrupt\n");
6922 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
6923 1.98 msaitoh return (ENXIO);
6924 1.98 msaitoh }
6925 1.98 msaitoh
6926 1.98 msaitoh kcpuset_create(&affinity, false);
6927 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
6928 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6929 1.98 msaitoh device_xname(dev), i);
6930 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
6931 1.98 msaitoh sizeof(intrbuf));
6932 1.98 msaitoh #ifdef IXGBE_MPSAFE
6933 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
6934 1.98 msaitoh true);
6935 1.98 msaitoh #endif
6936 1.98 msaitoh /* Set the handler function */
6937 1.333 msaitoh que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
6938 1.333 msaitoh sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6939 1.98 msaitoh intr_xname);
6940 1.98 msaitoh if (que->res == NULL) {
6941 1.98 msaitoh aprint_error_dev(dev,
6942 1.98 msaitoh "Failed to register QUE handler\n");
6943 1.119 msaitoh error = ENXIO;
6944 1.119 msaitoh goto err_out;
6945 1.98 msaitoh }
6946 1.98 msaitoh que->msix = vector;
6947 1.333 msaitoh sc->active_queues |= 1ULL << que->msix;
6948 1.99 msaitoh
6949 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
6950 1.98 msaitoh #ifdef RSS
6951 1.99 msaitoh /*
6952 1.99 msaitoh * The queue ID is used as the RSS layer bucket ID.
6953 1.99 msaitoh * We look up the queue ID -> RSS CPU ID and select
6954 1.99 msaitoh * that.
6955 1.99 msaitoh */
6956 1.99 msaitoh cpu_id = rss_getcpu(i % rss_getnumbuckets());
6957 1.99 msaitoh CPU_SETOF(cpu_id, &cpu_mask);
6958 1.98 msaitoh #endif
6959 1.99 msaitoh } else {
6960 1.99 msaitoh /*
6961 1.99 msaitoh * Bind the MSI-X vector, and thus the
6962 1.99 msaitoh * rings to the corresponding CPU.
6963 1.99 msaitoh *
6964 1.99 msaitoh * This just happens to match the default RSS
6965 1.99 msaitoh * round-robin bucket -> queue -> CPU allocation.
6966 1.99 msaitoh */
6967 1.333 msaitoh if (sc->num_queues > 1)
6968 1.99 msaitoh cpu_id = i;
6969 1.99 msaitoh }
6970 1.98 msaitoh /* Round-robin affinity */
6971 1.98 msaitoh kcpuset_zero(affinity);
6972 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
6973 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[i], affinity,
6974 1.98 msaitoh NULL);
6975 1.98 msaitoh aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6976 1.98 msaitoh intrstr);
6977 1.98 msaitoh if (error == 0) {
6978 1.98 msaitoh #if 1 /* def IXGBE_DEBUG */
6979 1.98 msaitoh #ifdef RSS
6980 1.322 skrll aprint_normal(", bound RSS bucket %d to CPU %d", i,
6981 1.99 msaitoh cpu_id % ncpu);
6982 1.98 msaitoh #else
6983 1.99 msaitoh aprint_normal(", bound queue %d to cpu %d", i,
6984 1.99 msaitoh cpu_id % ncpu);
6985 1.98 msaitoh #endif
6986 1.98 msaitoh #endif /* IXGBE_DEBUG */
6987 1.98 msaitoh }
6988 1.98 msaitoh aprint_normal("\n");
6989 1.99 msaitoh
6990 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6991 1.99 msaitoh txr->txr_si = softint_establish(
6992 1.229 msaitoh SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6993 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6994 1.119 msaitoh if (txr->txr_si == NULL) {
6995 1.119 msaitoh aprint_error_dev(dev,
6996 1.119 msaitoh "couldn't establish software interrupt\n");
6997 1.119 msaitoh error = ENXIO;
6998 1.119 msaitoh goto err_out;
6999 1.119 msaitoh }
7000 1.119 msaitoh }
7001 1.98 msaitoh que->que_si
7002 1.229 msaitoh = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
7003 1.98 msaitoh ixgbe_handle_que, que);
7004 1.98 msaitoh if (que->que_si == NULL) {
7005 1.98 msaitoh aprint_error_dev(dev,
7006 1.185 msaitoh "couldn't establish software interrupt\n");
7007 1.119 msaitoh error = ENXIO;
7008 1.119 msaitoh goto err_out;
7009 1.98 msaitoh }
7010 1.98 msaitoh }
7011 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
7012 1.333 msaitoh error = workqueue_create(&sc->txr_wq, wqname,
7013 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7014 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7015 1.128 knakahar if (error) {
7016 1.280 msaitoh aprint_error_dev(dev,
7017 1.280 msaitoh "couldn't create workqueue for deferred Tx\n");
7018 1.128 knakahar goto err_out;
7019 1.128 knakahar }
7020 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
7021 1.128 knakahar
7022 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
7023 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
7024 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7025 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7026 1.128 knakahar if (error) {
7027 1.128 knakahar aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
7028 1.128 knakahar goto err_out;
7029 1.128 knakahar }
7030 1.44 msaitoh
7031 1.98 msaitoh /* and Link */
7032 1.98 msaitoh cpu_id++;
7033 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
7034 1.333 msaitoh sc->vector = vector;
7035 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
7036 1.98 msaitoh sizeof(intrbuf));
7037 1.98 msaitoh #ifdef IXGBE_MPSAFE
7038 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE,
7039 1.98 msaitoh true);
7040 1.98 msaitoh #endif
7041 1.98 msaitoh /* Set the link handler function */
7042 1.333 msaitoh sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
7043 1.333 msaitoh sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc,
7044 1.98 msaitoh intr_xname);
7045 1.333 msaitoh if (sc->osdep.ihs[vector] == NULL) {
7046 1.98 msaitoh aprint_error_dev(dev, "Failed to register LINK handler\n");
7047 1.119 msaitoh error = ENXIO;
7048 1.119 msaitoh goto err_out;
7049 1.98 msaitoh }
7050 1.98 msaitoh /* Round-robin affinity */
7051 1.98 msaitoh kcpuset_zero(affinity);
7052 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
7053 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
7054 1.119 msaitoh NULL);
7055 1.44 msaitoh
7056 1.98 msaitoh aprint_normal_dev(dev,
7057 1.98 msaitoh "for link, interrupting at %s", intrstr);
7058 1.98 msaitoh if (error == 0)
7059 1.98 msaitoh aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
7060 1.44 msaitoh else
7061 1.98 msaitoh aprint_normal("\n");
7062 1.44 msaitoh
7063 1.98 msaitoh kcpuset_destroy(affinity);
7064 1.119 msaitoh aprint_normal_dev(dev,
7065 1.119 msaitoh "Using MSI-X interrupts with %d vectors\n", vector + 1);
7066 1.99 msaitoh
7067 1.44 msaitoh return (0);
7068 1.119 msaitoh
7069 1.119 msaitoh err_out:
7070 1.119 msaitoh kcpuset_destroy(affinity);
7071 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
7072 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
7073 1.119 msaitoh return (error);
7074 1.99 msaitoh } /* ixgbe_allocate_msix */
7075 1.44 msaitoh
7076 1.99 msaitoh /************************************************************************
7077 1.99 msaitoh * ixgbe_configure_interrupts
7078 1.99 msaitoh *
7079 1.99 msaitoh * Setup MSI-X, MSI, or legacy interrupts (in that order).
7080 1.99 msaitoh * This will also depend on user settings.
7081 1.99 msaitoh ************************************************************************/
7082 1.44 msaitoh static int
7083 1.333 msaitoh ixgbe_configure_interrupts(struct ixgbe_softc *sc)
7084 1.44 msaitoh {
7085 1.333 msaitoh device_t dev = sc->dev;
7086 1.333 msaitoh struct ixgbe_mac_info *mac = &sc->hw.mac;
7087 1.98 msaitoh int want, queues, msgs;
7088 1.44 msaitoh
7089 1.99 msaitoh /* Default to 1 queue if MSI-X setup fails */
7090 1.333 msaitoh sc->num_queues = 1;
7091 1.99 msaitoh
7092 1.98 msaitoh /* Override by tuneable */
7093 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX))
7094 1.98 msaitoh goto msi;
7095 1.44 msaitoh
7096 1.118 msaitoh /*
7097 1.118 msaitoh * NetBSD only: Use single vector MSI when number of CPU is 1 to save
7098 1.118 msaitoh * interrupt slot.
7099 1.118 msaitoh */
7100 1.118 msaitoh if (ncpu == 1)
7101 1.118 msaitoh goto msi;
7102 1.185 msaitoh
7103 1.99 msaitoh /* First try MSI-X */
7104 1.333 msaitoh msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
7105 1.98 msaitoh msgs = MIN(msgs, IXG_MAX_NINTR);
7106 1.98 msaitoh if (msgs < 2)
7107 1.98 msaitoh goto msi;
7108 1.44 msaitoh
7109 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7110 1.44 msaitoh
7111 1.98 msaitoh /* Figure out a reasonable auto config value */
7112 1.98 msaitoh queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
7113 1.44 msaitoh
7114 1.98 msaitoh #ifdef RSS
7115 1.98 msaitoh /* If we're doing RSS, clamp at the number of RSS buckets */
7116 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
7117 1.165 riastrad queues = uimin(queues, rss_getnumbuckets());
7118 1.98 msaitoh #endif
7119 1.99 msaitoh if (ixgbe_num_queues > queues) {
7120 1.333 msaitoh aprint_error_dev(sc->dev,
7121 1.319 msaitoh "ixgbe_num_queues (%d) is too large, "
7122 1.319 msaitoh "using reduced amount (%d).\n", ixgbe_num_queues, queues);
7123 1.99 msaitoh ixgbe_num_queues = queues;
7124 1.99 msaitoh }
7125 1.44 msaitoh
7126 1.98 msaitoh if (ixgbe_num_queues != 0)
7127 1.98 msaitoh queues = ixgbe_num_queues;
7128 1.98 msaitoh else
7129 1.165 riastrad queues = uimin(queues,
7130 1.165 riastrad uimin(mac->max_tx_queues, mac->max_rx_queues));
7131 1.44 msaitoh
7132 1.98 msaitoh /*
7133 1.99 msaitoh * Want one vector (RX/TX pair) per queue
7134 1.99 msaitoh * plus an additional for Link.
7135 1.99 msaitoh */
7136 1.98 msaitoh want = queues + 1;
7137 1.98 msaitoh if (msgs >= want)
7138 1.98 msaitoh msgs = want;
7139 1.44 msaitoh else {
7140 1.186 msaitoh aprint_error_dev(dev, "MSI-X Configuration Problem, "
7141 1.319 msaitoh "%d vectors but %d queues wanted!\n", msgs, want);
7142 1.98 msaitoh goto msi;
7143 1.44 msaitoh }
7144 1.333 msaitoh sc->num_queues = queues;
7145 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7146 1.99 msaitoh return (0);
7147 1.44 msaitoh
7148 1.98 msaitoh /*
7149 1.99 msaitoh * MSI-X allocation failed or provided us with
7150 1.99 msaitoh * less vectors than needed. Free MSI-X resources
7151 1.99 msaitoh * and we'll try enabling MSI.
7152 1.99 msaitoh */
7153 1.98 msaitoh msi:
7154 1.99 msaitoh /* Without MSI-X, some features are no longer supported */
7155 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
7156 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
7157 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7158 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
7159 1.99 msaitoh
7160 1.333 msaitoh msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag);
7161 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
7162 1.99 msaitoh if (msgs > 1)
7163 1.99 msaitoh msgs = 1;
7164 1.99 msaitoh if (msgs != 0) {
7165 1.99 msaitoh msgs = 1;
7166 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
7167 1.99 msaitoh return (0);
7168 1.99 msaitoh }
7169 1.99 msaitoh
7170 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7171 1.99 msaitoh aprint_error_dev(dev,
7172 1.99 msaitoh "Device does not support legacy interrupts.\n");
7173 1.99 msaitoh return 1;
7174 1.99 msaitoh }
7175 1.99 msaitoh
7176 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7177 1.99 msaitoh
7178 1.99 msaitoh return (0);
7179 1.99 msaitoh } /* ixgbe_configure_interrupts */
7180 1.44 msaitoh
7181 1.48 msaitoh
7182 1.99 msaitoh /************************************************************************
7183 1.99 msaitoh * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7184 1.99 msaitoh *
7185 1.99 msaitoh * Done outside of interrupt context since the driver might sleep
7186 1.99 msaitoh ************************************************************************/
7187 1.26 msaitoh static void
7188 1.98 msaitoh ixgbe_handle_link(void *context)
7189 1.26 msaitoh {
7190 1.333 msaitoh struct ixgbe_softc *sc = context;
7191 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
7192 1.26 msaitoh
7193 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
7194 1.257 msaitoh
7195 1.333 msaitoh IXGBE_EVC_ADD(&sc->link_workev, 1);
7196 1.333 msaitoh ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0);
7197 1.333 msaitoh ixgbe_update_link_status(sc);
7198 1.26 msaitoh
7199 1.98 msaitoh /* Re-enable link interrupts */
7200 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7201 1.99 msaitoh } /* ixgbe_handle_link */
7202 1.45 msaitoh
7203 1.161 kamil #if 0
7204 1.99 msaitoh /************************************************************************
7205 1.99 msaitoh * ixgbe_rearm_queues
7206 1.99 msaitoh ************************************************************************/
7207 1.160 msaitoh static __inline void
7208 1.333 msaitoh ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues)
7209 1.63 msaitoh {
7210 1.63 msaitoh u32 mask;
7211 1.63 msaitoh
7212 1.333 msaitoh switch (sc->hw.mac.type) {
7213 1.63 msaitoh case ixgbe_mac_82598EB:
7214 1.63 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7215 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
7216 1.63 msaitoh break;
7217 1.63 msaitoh case ixgbe_mac_82599EB:
7218 1.63 msaitoh case ixgbe_mac_X540:
7219 1.63 msaitoh case ixgbe_mac_X550:
7220 1.63 msaitoh case ixgbe_mac_X550EM_x:
7221 1.99 msaitoh case ixgbe_mac_X550EM_a:
7222 1.63 msaitoh mask = (queues & 0xFFFFFFFF);
7223 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
7224 1.63 msaitoh mask = (queues >> 32);
7225 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
7226 1.63 msaitoh break;
7227 1.63 msaitoh default:
7228 1.63 msaitoh break;
7229 1.63 msaitoh }
7230 1.99 msaitoh } /* ixgbe_rearm_queues */
7231 1.161 kamil #endif
7232