ixgbe.c revision 1.346 1 1.346 msaitoh /* $NetBSD: ixgbe.c,v 1.346 2023/11/02 05:07:57 msaitoh Exp $ */
2 1.99 msaitoh
3 1.1 dyoung /******************************************************************************
4 1.1 dyoung
5 1.99 msaitoh Copyright (c) 2001-2017, Intel Corporation
6 1.1 dyoung All rights reserved.
7 1.99 msaitoh
8 1.99 msaitoh Redistribution and use in source and binary forms, with or without
9 1.1 dyoung modification, are permitted provided that the following conditions are met:
10 1.99 msaitoh
11 1.99 msaitoh 1. Redistributions of source code must retain the above copyright notice,
12 1.1 dyoung this list of conditions and the following disclaimer.
13 1.99 msaitoh
14 1.99 msaitoh 2. Redistributions in binary form must reproduce the above copyright
15 1.99 msaitoh notice, this list of conditions and the following disclaimer in the
16 1.1 dyoung documentation and/or other materials provided with the distribution.
17 1.99 msaitoh
18 1.99 msaitoh 3. Neither the name of the Intel Corporation nor the names of its
19 1.99 msaitoh contributors may be used to endorse or promote products derived from
20 1.1 dyoung this software without specific prior written permission.
21 1.99 msaitoh
22 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 1.99 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.99 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.99 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 1.99 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.99 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.99 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.99 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.99 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 dyoung POSSIBILITY OF SUCH DAMAGE.
33 1.1 dyoung
34 1.1 dyoung ******************************************************************************/
35 1.145 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 1.99 msaitoh
37 1.1 dyoung /*
38 1.1 dyoung * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 1.1 dyoung * All rights reserved.
40 1.1 dyoung *
41 1.1 dyoung * This code is derived from software contributed to The NetBSD Foundation
42 1.1 dyoung * by Coyote Point Systems, Inc.
43 1.1 dyoung *
44 1.1 dyoung * Redistribution and use in source and binary forms, with or without
45 1.1 dyoung * modification, are permitted provided that the following conditions
46 1.1 dyoung * are met:
47 1.1 dyoung * 1. Redistributions of source code must retain the above copyright
48 1.1 dyoung * notice, this list of conditions and the following disclaimer.
49 1.1 dyoung * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 dyoung * notice, this list of conditions and the following disclaimer in the
51 1.1 dyoung * documentation and/or other materials provided with the distribution.
52 1.1 dyoung *
53 1.1 dyoung * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.1 dyoung * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.1 dyoung * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.1 dyoung * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.1 dyoung * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.1 dyoung * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.1 dyoung * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.1 dyoung * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.1 dyoung * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.1 dyoung * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.1 dyoung * POSSIBILITY OF SUCH DAMAGE.
64 1.1 dyoung */
65 1.1 dyoung
66 1.281 msaitoh #include <sys/cdefs.h>
67 1.346 msaitoh __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.346 2023/11/02 05:07:57 msaitoh Exp $");
68 1.281 msaitoh
69 1.80 msaitoh #ifdef _KERNEL_OPT
70 1.1 dyoung #include "opt_inet.h"
71 1.22 msaitoh #include "opt_inet6.h"
72 1.80 msaitoh #include "opt_net_mpsafe.h"
73 1.80 msaitoh #endif
74 1.1 dyoung
75 1.1 dyoung #include "ixgbe.h"
76 1.251 msaitoh #include "ixgbe_phy.h"
77 1.135 msaitoh #include "ixgbe_sriov.h"
78 1.1 dyoung
79 1.33 msaitoh #include <sys/cprng.h>
80 1.95 msaitoh #include <dev/mii/mii.h>
81 1.95 msaitoh #include <dev/mii/miivar.h>
82 1.33 msaitoh
83 1.99 msaitoh /************************************************************************
84 1.99 msaitoh * Driver version
85 1.99 msaitoh ************************************************************************/
86 1.159 maxv static const char ixgbe_driver_version[] = "4.0.1-k";
87 1.301 msaitoh /* XXX NetBSD: + 3.3.24 */
88 1.1 dyoung
89 1.99 msaitoh /************************************************************************
90 1.99 msaitoh * PCI Device ID Table
91 1.1 dyoung *
92 1.99 msaitoh * Used by probe to select devices to load on
93 1.99 msaitoh * Last field stores an index into ixgbe_strings
94 1.99 msaitoh * Last entry must be all 0s
95 1.1 dyoung *
96 1.99 msaitoh * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 1.99 msaitoh ************************************************************************/
98 1.159 maxv static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99 1.1 dyoung {
100 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 1.334 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0},
121 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
122 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
123 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
124 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
125 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
126 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
127 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
128 1.24 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
129 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
130 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
131 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
132 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
133 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
134 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
135 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
136 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
137 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
138 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
139 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
140 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
141 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
142 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
143 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
144 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
145 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
146 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
147 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
148 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
149 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
150 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
151 1.1 dyoung /* required last entry */
152 1.1 dyoung {0, 0, 0, 0, 0}
153 1.1 dyoung };
154 1.1 dyoung
155 1.99 msaitoh /************************************************************************
156 1.99 msaitoh * Table of branding strings
157 1.99 msaitoh ************************************************************************/
158 1.1 dyoung static const char *ixgbe_strings[] = {
159 1.1 dyoung "Intel(R) PRO/10GbE PCI-Express Network Driver"
160 1.1 dyoung };
161 1.1 dyoung
162 1.99 msaitoh /************************************************************************
163 1.99 msaitoh * Function prototypes
164 1.99 msaitoh ************************************************************************/
165 1.186 msaitoh static int ixgbe_probe(device_t, cfdata_t, void *);
166 1.333 msaitoh static void ixgbe_quirks(struct ixgbe_softc *);
167 1.186 msaitoh static void ixgbe_attach(device_t, device_t, void *);
168 1.186 msaitoh static int ixgbe_detach(device_t, int);
169 1.1 dyoung #if 0
170 1.186 msaitoh static int ixgbe_shutdown(device_t);
171 1.1 dyoung #endif
172 1.44 msaitoh static bool ixgbe_suspend(device_t, const pmf_qual_t *);
173 1.44 msaitoh static bool ixgbe_resume(device_t, const pmf_qual_t *);
174 1.98 msaitoh static int ixgbe_ifflags_cb(struct ethercom *);
175 1.186 msaitoh static int ixgbe_ioctl(struct ifnet *, u_long, void *);
176 1.1 dyoung static int ixgbe_init(struct ifnet *);
177 1.333 msaitoh static void ixgbe_init_locked(struct ixgbe_softc *);
178 1.232 msaitoh static void ixgbe_ifstop(struct ifnet *, int);
179 1.252 msaitoh static void ixgbe_stop_locked(void *);
180 1.333 msaitoh static void ixgbe_init_device_features(struct ixgbe_softc *);
181 1.333 msaitoh static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
182 1.333 msaitoh static void ixgbe_add_media_types(struct ixgbe_softc *);
183 1.186 msaitoh static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
184 1.186 msaitoh static int ixgbe_media_change(struct ifnet *);
185 1.333 msaitoh static int ixgbe_allocate_pci_resources(struct ixgbe_softc *,
186 1.1 dyoung const struct pci_attach_args *);
187 1.333 msaitoh static void ixgbe_free_deferred_handlers(struct ixgbe_softc *);
188 1.333 msaitoh static void ixgbe_get_slot_info(struct ixgbe_softc *);
189 1.333 msaitoh static int ixgbe_allocate_msix(struct ixgbe_softc *,
190 1.1 dyoung const struct pci_attach_args *);
191 1.333 msaitoh static int ixgbe_allocate_legacy(struct ixgbe_softc *,
192 1.1 dyoung const struct pci_attach_args *);
193 1.333 msaitoh static int ixgbe_configure_interrupts(struct ixgbe_softc *);
194 1.333 msaitoh static void ixgbe_free_pciintr_resources(struct ixgbe_softc *);
195 1.333 msaitoh static void ixgbe_free_pci_resources(struct ixgbe_softc *);
196 1.1 dyoung static void ixgbe_local_timer(void *);
197 1.233 msaitoh static void ixgbe_handle_timer(struct work *, void *);
198 1.186 msaitoh static void ixgbe_recovery_mode_timer(void *);
199 1.233 msaitoh static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
200 1.333 msaitoh static int ixgbe_setup_interface(device_t, struct ixgbe_softc *);
201 1.333 msaitoh static void ixgbe_config_gpie(struct ixgbe_softc *);
202 1.333 msaitoh static void ixgbe_config_dmac(struct ixgbe_softc *);
203 1.333 msaitoh static void ixgbe_config_delay_values(struct ixgbe_softc *);
204 1.333 msaitoh static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *);
205 1.333 msaitoh static void ixgbe_config_link(struct ixgbe_softc *);
206 1.333 msaitoh static void ixgbe_check_wol_support(struct ixgbe_softc *);
207 1.333 msaitoh static int ixgbe_setup_low_power_mode(struct ixgbe_softc *);
208 1.161 kamil #if 0
209 1.333 msaitoh static void ixgbe_rearm_queues(struct ixgbe_softc *, u64);
210 1.161 kamil #endif
211 1.1 dyoung
212 1.333 msaitoh static void ixgbe_initialize_transmit_units(struct ixgbe_softc *);
213 1.333 msaitoh static void ixgbe_initialize_receive_units(struct ixgbe_softc *);
214 1.333 msaitoh static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
215 1.333 msaitoh static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
216 1.333 msaitoh static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
217 1.333 msaitoh
218 1.333 msaitoh static void ixgbe_enable_intr(struct ixgbe_softc *);
219 1.333 msaitoh static void ixgbe_disable_intr(struct ixgbe_softc *);
220 1.333 msaitoh static void ixgbe_update_stats_counters(struct ixgbe_softc *);
221 1.333 msaitoh static void ixgbe_set_rxfilter(struct ixgbe_softc *);
222 1.333 msaitoh static void ixgbe_update_link_status(struct ixgbe_softc *);
223 1.333 msaitoh static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
224 1.333 msaitoh static void ixgbe_configure_ivars(struct ixgbe_softc *);
225 1.1 dyoung static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
226 1.333 msaitoh static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
227 1.1 dyoung
228 1.333 msaitoh static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *);
229 1.333 msaitoh static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *);
230 1.193 msaitoh static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
231 1.333 msaitoh static int ixgbe_register_vlan(struct ixgbe_softc *, u16);
232 1.333 msaitoh static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16);
233 1.1 dyoung
234 1.333 msaitoh static void ixgbe_add_device_sysctls(struct ixgbe_softc *);
235 1.333 msaitoh static void ixgbe_add_hw_stats(struct ixgbe_softc *);
236 1.333 msaitoh static void ixgbe_clear_evcnt(struct ixgbe_softc *);
237 1.333 msaitoh static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238 1.333 msaitoh static int ixgbe_set_advertise(struct ixgbe_softc *, int);
239 1.333 msaitoh static int ixgbe_get_default_advertise(struct ixgbe_softc *);
240 1.44 msaitoh
241 1.44 msaitoh /* Sysctl handlers */
242 1.52 msaitoh static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
243 1.52 msaitoh static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
244 1.186 msaitoh static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
245 1.44 msaitoh static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
246 1.44 msaitoh static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
247 1.44 msaitoh static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
248 1.48 msaitoh #ifdef IXGBE_DEBUG
249 1.48 msaitoh static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
250 1.48 msaitoh static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
251 1.48 msaitoh #endif
252 1.186 msaitoh static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
253 1.287 msaitoh static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
254 1.186 msaitoh static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
255 1.186 msaitoh static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
256 1.186 msaitoh static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
257 1.186 msaitoh static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
258 1.186 msaitoh static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
259 1.158 msaitoh static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
260 1.286 msaitoh static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
261 1.313 msaitoh static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
262 1.313 msaitoh static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
263 1.44 msaitoh static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
264 1.44 msaitoh static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
265 1.1 dyoung
266 1.277 msaitoh /* Interrupt functions */
267 1.34 msaitoh static int ixgbe_msix_que(void *);
268 1.233 msaitoh static int ixgbe_msix_admin(void *);
269 1.333 msaitoh static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *);
270 1.277 msaitoh static int ixgbe_legacy_irq(void *);
271 1.1 dyoung
272 1.233 msaitoh /* Event handlers running on workqueue */
273 1.1 dyoung static void ixgbe_handle_que(void *);
274 1.1 dyoung static void ixgbe_handle_link(void *);
275 1.233 msaitoh static void ixgbe_handle_msf(void *);
276 1.273 msaitoh static void ixgbe_handle_mod(void *, bool);
277 1.44 msaitoh static void ixgbe_handle_phy(void *);
278 1.1 dyoung
279 1.233 msaitoh /* Deferred workqueue handlers */
280 1.233 msaitoh static void ixgbe_handle_admin(struct work *, void *);
281 1.128 knakahar static void ixgbe_handle_que_work(struct work *, void *);
282 1.128 knakahar
283 1.159 maxv static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
284 1.1 dyoung
285 1.99 msaitoh /************************************************************************
286 1.99 msaitoh * NetBSD Device Interface Entry Points
287 1.99 msaitoh ************************************************************************/
288 1.333 msaitoh CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc),
289 1.1 dyoung ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
290 1.1 dyoung DVF_DETACH_SHUTDOWN);
291 1.1 dyoung
292 1.1 dyoung #if 0
293 1.44 msaitoh devclass_t ix_devclass;
294 1.44 msaitoh DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
295 1.1 dyoung
296 1.44 msaitoh MODULE_DEPEND(ix, pci, 1, 1, 1);
297 1.44 msaitoh MODULE_DEPEND(ix, ether, 1, 1, 1);
298 1.115 msaitoh #ifdef DEV_NETMAP
299 1.115 msaitoh MODULE_DEPEND(ix, netmap, 1, 1, 1);
300 1.115 msaitoh #endif
301 1.1 dyoung #endif
302 1.1 dyoung
303 1.1 dyoung /*
304 1.99 msaitoh * TUNEABLE PARAMETERS:
305 1.99 msaitoh */
306 1.1 dyoung
307 1.1 dyoung /*
308 1.99 msaitoh * AIM: Adaptive Interrupt Moderation
309 1.99 msaitoh * which means that the interrupt rate
310 1.99 msaitoh * is varied over time based on the
311 1.99 msaitoh * traffic for that interrupt vector
312 1.99 msaitoh */
313 1.73 msaitoh static bool ixgbe_enable_aim = true;
314 1.52 msaitoh #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
315 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
316 1.52 msaitoh "Enable adaptive interrupt moderation");
317 1.1 dyoung
318 1.22 msaitoh static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
319 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
320 1.52 msaitoh &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
321 1.1 dyoung
322 1.1 dyoung /* How many packets rxeof tries to clean at a time */
323 1.1 dyoung static int ixgbe_rx_process_limit = 256;
324 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
325 1.99 msaitoh &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
326 1.1 dyoung
327 1.28 msaitoh /* How many packets txeof tries to clean at a time */
328 1.28 msaitoh static int ixgbe_tx_process_limit = 256;
329 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
330 1.52 msaitoh &ixgbe_tx_process_limit, 0,
331 1.99 msaitoh "Maximum number of sent packets to process at a time, -1 means unlimited");
332 1.52 msaitoh
333 1.52 msaitoh /* Flow control setting, default to full */
334 1.52 msaitoh static int ixgbe_flow_control = ixgbe_fc_full;
335 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
336 1.52 msaitoh &ixgbe_flow_control, 0, "Default flow control used for all adapters");
337 1.52 msaitoh
338 1.179 msaitoh /* Which packet processing uses workqueue or softint */
339 1.128 knakahar static bool ixgbe_txrx_workqueue = false;
340 1.128 knakahar
341 1.1 dyoung /*
342 1.99 msaitoh * Smart speed setting, default to on
343 1.99 msaitoh * this only works as a compile option
344 1.99 msaitoh * right now as its during attach, set
345 1.99 msaitoh * this to 'ixgbe_smart_speed_off' to
346 1.99 msaitoh * disable.
347 1.99 msaitoh */
348 1.1 dyoung static int ixgbe_smart_speed = ixgbe_smart_speed_on;
349 1.1 dyoung
350 1.1 dyoung /*
351 1.99 msaitoh * MSI-X should be the default for best performance,
352 1.1 dyoung * but this allows it to be forced off for testing.
353 1.1 dyoung */
354 1.1 dyoung static int ixgbe_enable_msix = 1;
355 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
356 1.52 msaitoh "Enable MSI-X interrupts");
357 1.1 dyoung
358 1.1 dyoung /*
359 1.1 dyoung * Number of Queues, can be set to 0,
360 1.1 dyoung * it then autoconfigures based on the
361 1.1 dyoung * number of cpus with a max of 8. This
362 1.220 pgoyette * can be overridden manually here.
363 1.1 dyoung */
364 1.62 msaitoh static int ixgbe_num_queues = 0;
365 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
366 1.52 msaitoh "Number of queues to configure, 0 indicates autoconfigure");
367 1.1 dyoung
368 1.1 dyoung /*
369 1.99 msaitoh * Number of TX descriptors per ring,
370 1.99 msaitoh * setting higher than RX as this seems
371 1.99 msaitoh * the better performing choice.
372 1.99 msaitoh */
373 1.335 msaitoh static int ixgbe_txd = DEFAULT_TXD;
374 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
375 1.52 msaitoh "Number of transmit descriptors per queue");
376 1.1 dyoung
377 1.1 dyoung /* Number of RX descriptors per ring */
378 1.335 msaitoh static int ixgbe_rxd = DEFAULT_RXD;
379 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
380 1.52 msaitoh "Number of receive descriptors per queue");
381 1.33 msaitoh
382 1.33 msaitoh /*
383 1.99 msaitoh * Defining this on will allow the use
384 1.99 msaitoh * of unsupported SFP+ modules, note that
385 1.99 msaitoh * doing so you are on your own :)
386 1.99 msaitoh */
387 1.35 msaitoh static int allow_unsupported_sfp = false;
388 1.52 msaitoh #define TUNABLE_INT(__x, __y)
389 1.52 msaitoh TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
390 1.1 dyoung
391 1.99 msaitoh /*
392 1.99 msaitoh * Not sure if Flow Director is fully baked,
393 1.99 msaitoh * so we'll default to turning it off.
394 1.99 msaitoh */
395 1.99 msaitoh static int ixgbe_enable_fdir = 0;
396 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
397 1.99 msaitoh "Enable Flow Director");
398 1.99 msaitoh
399 1.99 msaitoh /* Legacy Transmit (single queue) */
400 1.99 msaitoh static int ixgbe_enable_legacy_tx = 0;
401 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
402 1.99 msaitoh &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
403 1.99 msaitoh
404 1.99 msaitoh /* Receive-Side Scaling */
405 1.99 msaitoh static int ixgbe_enable_rss = 1;
406 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
407 1.99 msaitoh "Enable Receive-Side Scaling (RSS)");
408 1.99 msaitoh
409 1.99 msaitoh #if 0
410 1.99 msaitoh static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
411 1.99 msaitoh static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
412 1.1 dyoung #endif
413 1.1 dyoung
414 1.80 msaitoh #ifdef NET_MPSAFE
415 1.80 msaitoh #define IXGBE_MPSAFE 1
416 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
417 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
418 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
419 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
420 1.80 msaitoh #else
421 1.80 msaitoh #define IXGBE_CALLOUT_FLAGS 0
422 1.229 msaitoh #define IXGBE_SOFTINT_FLAGS 0
423 1.128 knakahar #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
424 1.223 thorpej #define IXGBE_TASKLET_WQ_FLAGS 0
425 1.80 msaitoh #endif
426 1.128 knakahar #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
427 1.80 msaitoh
428 1.312 msaitoh /* Interval between reports of errors */
429 1.312 msaitoh static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
430 1.312 msaitoh
431 1.99 msaitoh /************************************************************************
432 1.99 msaitoh * ixgbe_initialize_rss_mapping
433 1.99 msaitoh ************************************************************************/
434 1.98 msaitoh static void
435 1.333 msaitoh ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
436 1.1 dyoung {
437 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
438 1.186 msaitoh u32 reta = 0, mrqc, rss_key[10];
439 1.186 msaitoh int queue_id, table_size, index_mult;
440 1.186 msaitoh int i, j;
441 1.186 msaitoh u32 rss_hash_config;
442 1.99 msaitoh
443 1.122 knakahar /* force use default RSS key. */
444 1.122 knakahar #ifdef __NetBSD__
445 1.122 knakahar rss_getkey((uint8_t *) &rss_key);
446 1.122 knakahar #else
447 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
448 1.99 msaitoh /* Fetch the configured RSS key */
449 1.99 msaitoh rss_getkey((uint8_t *) &rss_key);
450 1.99 msaitoh } else {
451 1.99 msaitoh /* set up random bits */
452 1.99 msaitoh cprng_fast(&rss_key, sizeof(rss_key));
453 1.99 msaitoh }
454 1.122 knakahar #endif
455 1.1 dyoung
456 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
457 1.98 msaitoh index_mult = 0x1;
458 1.98 msaitoh table_size = 128;
459 1.333 msaitoh switch (sc->hw.mac.type) {
460 1.98 msaitoh case ixgbe_mac_82598EB:
461 1.98 msaitoh index_mult = 0x11;
462 1.98 msaitoh break;
463 1.98 msaitoh case ixgbe_mac_X550:
464 1.98 msaitoh case ixgbe_mac_X550EM_x:
465 1.99 msaitoh case ixgbe_mac_X550EM_a:
466 1.98 msaitoh table_size = 512;
467 1.98 msaitoh break;
468 1.98 msaitoh default:
469 1.98 msaitoh break;
470 1.98 msaitoh }
471 1.1 dyoung
472 1.98 msaitoh /* Set up the redirection table */
473 1.99 msaitoh for (i = 0, j = 0; i < table_size; i++, j++) {
474 1.333 msaitoh if (j == sc->num_queues)
475 1.99 msaitoh j = 0;
476 1.99 msaitoh
477 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
478 1.99 msaitoh /*
479 1.99 msaitoh * Fetch the RSS bucket id for the given indirection
480 1.99 msaitoh * entry. Cap it at the number of configured buckets
481 1.99 msaitoh * (which is num_queues.)
482 1.99 msaitoh */
483 1.99 msaitoh queue_id = rss_get_indirection_to_bucket(i);
484 1.333 msaitoh queue_id = queue_id % sc->num_queues;
485 1.99 msaitoh } else
486 1.99 msaitoh queue_id = (j * index_mult);
487 1.99 msaitoh
488 1.98 msaitoh /*
489 1.98 msaitoh * The low 8 bits are for hash value (n+0);
490 1.98 msaitoh * The next 8 bits are for hash value (n+1), etc.
491 1.98 msaitoh */
492 1.98 msaitoh reta = reta >> 8;
493 1.98 msaitoh reta = reta | (((uint32_t) queue_id) << 24);
494 1.98 msaitoh if ((i & 3) == 3) {
495 1.98 msaitoh if (i < 128)
496 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
497 1.98 msaitoh else
498 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
499 1.99 msaitoh reta);
500 1.98 msaitoh reta = 0;
501 1.98 msaitoh }
502 1.98 msaitoh }
503 1.1 dyoung
504 1.98 msaitoh /* Now fill our hash function seeds */
505 1.99 msaitoh for (i = 0; i < 10; i++)
506 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
507 1.1 dyoung
508 1.98 msaitoh /* Perform hash on these packet types */
509 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
510 1.99 msaitoh rss_hash_config = rss_gethashconfig();
511 1.99 msaitoh else {
512 1.99 msaitoh /*
513 1.99 msaitoh * Disable UDP - IP fragments aren't currently being handled
514 1.99 msaitoh * and so we end up with a mix of 2-tuple and 4-tuple
515 1.99 msaitoh * traffic.
516 1.99 msaitoh */
517 1.99 msaitoh rss_hash_config = RSS_HASHTYPE_RSS_IPV4
518 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV4
519 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6
520 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6
521 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6_EX
522 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
523 1.99 msaitoh }
524 1.99 msaitoh
525 1.98 msaitoh mrqc = IXGBE_MRQC_RSSEN;
526 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
527 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
528 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
529 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
530 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
531 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
532 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
533 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
534 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
535 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
536 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
537 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
538 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
539 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
540 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
541 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
542 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
543 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
544 1.333 msaitoh mrqc |= ixgbe_get_mrqc(sc->iov_mode);
545 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
546 1.99 msaitoh } /* ixgbe_initialize_rss_mapping */
547 1.1 dyoung
548 1.99 msaitoh /************************************************************************
549 1.99 msaitoh * ixgbe_initialize_receive_units - Setup receive registers and features.
550 1.99 msaitoh ************************************************************************/
551 1.98 msaitoh #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
552 1.185 msaitoh
553 1.1 dyoung static void
554 1.333 msaitoh ixgbe_initialize_receive_units(struct ixgbe_softc *sc)
555 1.1 dyoung {
556 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
557 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
558 1.333 msaitoh struct ifnet *ifp = sc->ifp;
559 1.186 msaitoh int i, j;
560 1.98 msaitoh u32 bufsz, fctrl, srrctl, rxcsum;
561 1.98 msaitoh u32 hlreg;
562 1.98 msaitoh
563 1.98 msaitoh /*
564 1.98 msaitoh * Make sure receives are disabled while
565 1.98 msaitoh * setting up the descriptor ring
566 1.98 msaitoh */
567 1.98 msaitoh ixgbe_disable_rx(hw);
568 1.1 dyoung
569 1.98 msaitoh /* Enable broadcasts */
570 1.98 msaitoh fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
571 1.98 msaitoh fctrl |= IXGBE_FCTRL_BAM;
572 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) {
573 1.98 msaitoh fctrl |= IXGBE_FCTRL_DPF;
574 1.98 msaitoh fctrl |= IXGBE_FCTRL_PMCF;
575 1.98 msaitoh }
576 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
577 1.1 dyoung
578 1.98 msaitoh /* Set for Jumbo Frames? */
579 1.98 msaitoh hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
580 1.98 msaitoh if (ifp->if_mtu > ETHERMTU)
581 1.98 msaitoh hlreg |= IXGBE_HLREG0_JUMBOEN;
582 1.98 msaitoh else
583 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_JUMBOEN;
584 1.99 msaitoh
585 1.98 msaitoh #ifdef DEV_NETMAP
586 1.99 msaitoh /* CRC stripping is conditional in Netmap */
587 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
588 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP) &&
589 1.99 msaitoh !ix_crcstrip)
590 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
591 1.60 msaitoh else
592 1.99 msaitoh #endif /* DEV_NETMAP */
593 1.98 msaitoh hlreg |= IXGBE_HLREG0_RXCRCSTRP;
594 1.99 msaitoh
595 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
596 1.1 dyoung
597 1.333 msaitoh bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
598 1.99 msaitoh IXGBE_SRRCTL_BSIZEPKT_SHIFT;
599 1.1 dyoung
600 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++) {
601 1.98 msaitoh u64 rdba = rxr->rxdma.dma_paddr;
602 1.152 msaitoh u32 reg;
603 1.98 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
604 1.98 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
605 1.99 msaitoh j = rxr->me;
606 1.1 dyoung
607 1.98 msaitoh /* Setup the Base and Length of the Rx Descriptor Ring */
608 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
609 1.98 msaitoh (rdba & 0x00000000ffffffffULL));
610 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
611 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
612 1.333 msaitoh sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
613 1.1 dyoung
614 1.98 msaitoh /* Set up the SRRCTL register */
615 1.98 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
616 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
617 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
618 1.98 msaitoh srrctl |= bufsz;
619 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
620 1.47 msaitoh
621 1.98 msaitoh /* Set RQSMR (Receive Queue Statistic Mapping) register */
622 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
623 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
624 1.98 msaitoh reg |= i << (regshift * 8);
625 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
626 1.98 msaitoh
627 1.98 msaitoh /*
628 1.98 msaitoh * Set DROP_EN iff we have no flow control and >1 queue.
629 1.98 msaitoh * Note that srrctl was cleared shortly before during reset,
630 1.98 msaitoh * so we do not need to clear the bit, but do it just in case
631 1.98 msaitoh * this code is moved elsewhere.
632 1.98 msaitoh */
633 1.333 msaitoh if ((sc->num_queues > 1) &&
634 1.333 msaitoh (sc->hw.fc.requested_mode == ixgbe_fc_none))
635 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
636 1.319 msaitoh else
637 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
638 1.98 msaitoh
639 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
640 1.98 msaitoh
641 1.98 msaitoh /* Setup the HW Rx Head and Tail Descriptor Pointers */
642 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
643 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
644 1.98 msaitoh
645 1.98 msaitoh /* Set the driver rx tail address */
646 1.98 msaitoh rxr->tail = IXGBE_RDT(rxr->me);
647 1.98 msaitoh }
648 1.98 msaitoh
649 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
650 1.99 msaitoh u32 psrtype = IXGBE_PSRTYPE_TCPHDR
651 1.186 msaitoh | IXGBE_PSRTYPE_UDPHDR
652 1.186 msaitoh | IXGBE_PSRTYPE_IPV4HDR
653 1.186 msaitoh | IXGBE_PSRTYPE_IPV6HDR;
654 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
655 1.98 msaitoh }
656 1.98 msaitoh
657 1.98 msaitoh rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
658 1.98 msaitoh
659 1.333 msaitoh ixgbe_initialize_rss_mapping(sc);
660 1.98 msaitoh
661 1.333 msaitoh if (sc->num_queues > 1) {
662 1.98 msaitoh /* RSS and RX IPP Checksum are mutually exclusive */
663 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
664 1.98 msaitoh }
665 1.98 msaitoh
666 1.98 msaitoh if (ifp->if_capenable & IFCAP_RXCSUM)
667 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD;
668 1.98 msaitoh
669 1.98 msaitoh /* This is useful for calculating UDP/IP fragment checksums */
670 1.98 msaitoh if (!(rxcsum & IXGBE_RXCSUM_PCSD))
671 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_IPPCSE;
672 1.98 msaitoh
673 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
674 1.98 msaitoh
675 1.99 msaitoh } /* ixgbe_initialize_receive_units */
676 1.98 msaitoh
677 1.99 msaitoh /************************************************************************
678 1.99 msaitoh * ixgbe_initialize_transmit_units - Enable transmit units.
679 1.99 msaitoh ************************************************************************/
680 1.98 msaitoh static void
681 1.333 msaitoh ixgbe_initialize_transmit_units(struct ixgbe_softc *sc)
682 1.98 msaitoh {
683 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
684 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
685 1.144 msaitoh int i;
686 1.98 msaitoh
687 1.225 msaitoh INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
688 1.225 msaitoh
689 1.98 msaitoh /* Setup the Base and Length of the Tx Descriptor Ring */
690 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, txr++) {
691 1.99 msaitoh u64 tdba = txr->txdma.dma_paddr;
692 1.99 msaitoh u32 txctrl = 0;
693 1.152 msaitoh u32 tqsmreg, reg;
694 1.152 msaitoh int regnum = i / 4; /* 1 register per 4 queues */
695 1.152 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */
696 1.99 msaitoh int j = txr->me;
697 1.98 msaitoh
698 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
699 1.98 msaitoh (tdba & 0x00000000ffffffffULL));
700 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
701 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
702 1.333 msaitoh sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
703 1.98 msaitoh
704 1.152 msaitoh /*
705 1.152 msaitoh * Set TQSMR (Transmit Queue Statistic Mapping) register.
706 1.152 msaitoh * Register location is different between 82598 and others.
707 1.152 msaitoh */
708 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
709 1.152 msaitoh tqsmreg = IXGBE_TQSMR(regnum);
710 1.152 msaitoh else
711 1.152 msaitoh tqsmreg = IXGBE_TQSM(regnum);
712 1.152 msaitoh reg = IXGBE_READ_REG(hw, tqsmreg);
713 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8));
714 1.152 msaitoh reg |= i << (regshift * 8);
715 1.152 msaitoh IXGBE_WRITE_REG(hw, tqsmreg, reg);
716 1.152 msaitoh
717 1.98 msaitoh /* Setup the HW Tx Head and Tail descriptor pointers */
718 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
719 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
720 1.98 msaitoh
721 1.98 msaitoh /* Cache the tail address */
722 1.98 msaitoh txr->tail = IXGBE_TDT(j);
723 1.98 msaitoh
724 1.155 msaitoh txr->txr_no_space = false;
725 1.155 msaitoh
726 1.345 msaitoh /* Disable relax ordering */
727 1.98 msaitoh /*
728 1.98 msaitoh * Note: for X550 series devices, these registers are actually
729 1.295 andvar * prefixed with TPH_ instead of DCA_, but the addresses and
730 1.98 msaitoh * fields remain the same.
731 1.98 msaitoh */
732 1.98 msaitoh switch (hw->mac.type) {
733 1.98 msaitoh case ixgbe_mac_82598EB:
734 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
735 1.98 msaitoh break;
736 1.98 msaitoh default:
737 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
738 1.98 msaitoh break;
739 1.98 msaitoh }
740 1.98 msaitoh txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
741 1.98 msaitoh switch (hw->mac.type) {
742 1.98 msaitoh case ixgbe_mac_82598EB:
743 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
744 1.98 msaitoh break;
745 1.98 msaitoh default:
746 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
747 1.98 msaitoh break;
748 1.98 msaitoh }
749 1.98 msaitoh
750 1.98 msaitoh }
751 1.98 msaitoh
752 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
753 1.98 msaitoh u32 dmatxctl, rttdcs;
754 1.99 msaitoh
755 1.98 msaitoh dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
756 1.98 msaitoh dmatxctl |= IXGBE_DMATXCTL_TE;
757 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
758 1.98 msaitoh /* Disable arbiter to set MTQC */
759 1.98 msaitoh rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
760 1.98 msaitoh rttdcs |= IXGBE_RTTDCS_ARBDIS;
761 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC,
763 1.333 msaitoh ixgbe_get_mtqc(sc->iov_mode));
764 1.98 msaitoh rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
765 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
766 1.98 msaitoh }
767 1.99 msaitoh } /* ixgbe_initialize_transmit_units */
768 1.98 msaitoh
769 1.245 msaitoh static void
770 1.333 msaitoh ixgbe_quirks(struct ixgbe_softc *sc)
771 1.245 msaitoh {
772 1.333 msaitoh device_t dev = sc->dev;
773 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
774 1.245 msaitoh const char *vendor, *product;
775 1.245 msaitoh
776 1.248 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
777 1.248 msaitoh /*
778 1.248 msaitoh * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
779 1.248 msaitoh * MA10-ST0.
780 1.248 msaitoh */
781 1.248 msaitoh vendor = pmf_get_platform("system-vendor");
782 1.248 msaitoh product = pmf_get_platform("system-product");
783 1.245 msaitoh
784 1.248 msaitoh if ((vendor == NULL) || (product == NULL))
785 1.248 msaitoh return;
786 1.245 msaitoh
787 1.248 msaitoh if ((strcmp(vendor, "GIGABYTE") == 0) &&
788 1.248 msaitoh (strcmp(product, "MA10-ST0") == 0)) {
789 1.248 msaitoh aprint_verbose_dev(dev,
790 1.248 msaitoh "Enable SFP+ MOD_ABS inverse quirk\n");
791 1.333 msaitoh sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
792 1.248 msaitoh }
793 1.245 msaitoh }
794 1.245 msaitoh }
795 1.245 msaitoh
796 1.99 msaitoh /************************************************************************
797 1.99 msaitoh * ixgbe_attach - Device initialization routine
798 1.98 msaitoh *
799 1.99 msaitoh * Called when the driver is being loaded.
800 1.99 msaitoh * Identifies the type of hardware, allocates all resources
801 1.99 msaitoh * and initializes the hardware.
802 1.98 msaitoh *
803 1.99 msaitoh * return 0 on success, positive on failure
804 1.99 msaitoh ************************************************************************/
805 1.98 msaitoh static void
806 1.98 msaitoh ixgbe_attach(device_t parent, device_t dev, void *aux)
807 1.98 msaitoh {
808 1.333 msaitoh struct ixgbe_softc *sc;
809 1.98 msaitoh struct ixgbe_hw *hw;
810 1.186 msaitoh int error = -1;
811 1.98 msaitoh u32 ctrl_ext;
812 1.340 msaitoh u16 high, low, nvmreg, dev_caps;
813 1.99 msaitoh pcireg_t id, subid;
814 1.159 maxv const ixgbe_vendor_info_t *ent;
815 1.98 msaitoh struct pci_attach_args *pa = aux;
816 1.219 msaitoh bool unsupported_sfp = false;
817 1.98 msaitoh const char *str;
818 1.233 msaitoh char wqname[MAXCOMLEN];
819 1.99 msaitoh char buf[256];
820 1.98 msaitoh
821 1.98 msaitoh INIT_DEBUGOUT("ixgbe_attach: begin");
822 1.98 msaitoh
823 1.98 msaitoh /* Allocate, clear, and link in our adapter structure */
824 1.333 msaitoh sc = device_private(dev);
825 1.333 msaitoh sc->hw.back = sc;
826 1.333 msaitoh sc->dev = dev;
827 1.333 msaitoh hw = &sc->hw;
828 1.333 msaitoh sc->osdep.pc = pa->pa_pc;
829 1.333 msaitoh sc->osdep.tag = pa->pa_tag;
830 1.98 msaitoh if (pci_dma64_available(pa))
831 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat64;
832 1.98 msaitoh else
833 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat;
834 1.333 msaitoh sc->osdep.attached = false;
835 1.333 msaitoh sc->osdep.detaching = false;
836 1.98 msaitoh
837 1.98 msaitoh ent = ixgbe_lookup(pa);
838 1.98 msaitoh
839 1.98 msaitoh KASSERT(ent != NULL);
840 1.98 msaitoh
841 1.98 msaitoh aprint_normal(": %s, Version - %s\n",
842 1.98 msaitoh ixgbe_strings[ent->index], ixgbe_driver_version);
843 1.98 msaitoh
844 1.233 msaitoh /* Core Lock Init */
845 1.333 msaitoh IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
846 1.1 dyoung
847 1.233 msaitoh /* Set up the timer callout and workqueue */
848 1.333 msaitoh callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
849 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
850 1.333 msaitoh error = workqueue_create(&sc->timer_wq, wqname,
851 1.333 msaitoh ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
852 1.233 msaitoh IXGBE_TASKLET_WQ_FLAGS);
853 1.233 msaitoh if (error) {
854 1.233 msaitoh aprint_error_dev(dev,
855 1.233 msaitoh "could not create timer workqueue (%d)\n", error);
856 1.233 msaitoh goto err_out;
857 1.233 msaitoh }
858 1.1 dyoung
859 1.1 dyoung /* Determine hardware revision */
860 1.99 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
861 1.99 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
862 1.99 msaitoh
863 1.99 msaitoh hw->vendor_id = PCI_VENDOR(id);
864 1.99 msaitoh hw->device_id = PCI_PRODUCT(id);
865 1.99 msaitoh hw->revision_id =
866 1.99 msaitoh PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
867 1.99 msaitoh hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
868 1.99 msaitoh hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
869 1.99 msaitoh
870 1.248 msaitoh /* Set quirk flags */
871 1.333 msaitoh ixgbe_quirks(sc);
872 1.248 msaitoh
873 1.99 msaitoh /*
874 1.99 msaitoh * Make sure BUSMASTER is set
875 1.99 msaitoh */
876 1.99 msaitoh ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
877 1.99 msaitoh
878 1.99 msaitoh /* Do base PCI setup - map BAR0 */
879 1.333 msaitoh if (ixgbe_allocate_pci_resources(sc, pa)) {
880 1.99 msaitoh aprint_error_dev(dev, "Allocation of PCI resources failed\n");
881 1.99 msaitoh error = ENXIO;
882 1.99 msaitoh goto err_out;
883 1.99 msaitoh }
884 1.99 msaitoh
885 1.99 msaitoh /* let hardware know driver is loaded */
886 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
887 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
888 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
889 1.99 msaitoh
890 1.99 msaitoh /*
891 1.99 msaitoh * Initialize the shared code
892 1.99 msaitoh */
893 1.144 msaitoh if (ixgbe_init_shared_code(hw) != 0) {
894 1.319 msaitoh aprint_error_dev(dev,
895 1.319 msaitoh "Unable to initialize the shared code\n");
896 1.99 msaitoh error = ENXIO;
897 1.99 msaitoh goto err_out;
898 1.99 msaitoh }
899 1.1 dyoung
900 1.79 msaitoh switch (hw->mac.type) {
901 1.79 msaitoh case ixgbe_mac_82598EB:
902 1.79 msaitoh str = "82598EB";
903 1.79 msaitoh break;
904 1.79 msaitoh case ixgbe_mac_82599EB:
905 1.79 msaitoh str = "82599EB";
906 1.79 msaitoh break;
907 1.79 msaitoh case ixgbe_mac_X540:
908 1.79 msaitoh str = "X540";
909 1.79 msaitoh break;
910 1.79 msaitoh case ixgbe_mac_X550:
911 1.79 msaitoh str = "X550";
912 1.79 msaitoh break;
913 1.79 msaitoh case ixgbe_mac_X550EM_x:
914 1.246 msaitoh str = "X550EM X";
915 1.79 msaitoh break;
916 1.99 msaitoh case ixgbe_mac_X550EM_a:
917 1.99 msaitoh str = "X550EM A";
918 1.99 msaitoh break;
919 1.79 msaitoh default:
920 1.79 msaitoh str = "Unknown";
921 1.79 msaitoh break;
922 1.79 msaitoh }
923 1.79 msaitoh aprint_normal_dev(dev, "device %s\n", str);
924 1.79 msaitoh
925 1.99 msaitoh hw->allow_unsupported_sfp = allow_unsupported_sfp;
926 1.99 msaitoh
927 1.99 msaitoh /* Pick up the 82599 settings */
928 1.292 msaitoh if (hw->mac.type != ixgbe_mac_82598EB)
929 1.99 msaitoh hw->phy.smart_speed = ixgbe_smart_speed;
930 1.292 msaitoh
931 1.292 msaitoh /* Set the right number of segments */
932 1.292 msaitoh KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
933 1.333 msaitoh sc->num_segs = IXGBE_SCATTER_DEFAULT;
934 1.99 msaitoh
935 1.172 msaitoh /* Ensure SW/FW semaphore is free */
936 1.172 msaitoh ixgbe_init_swfw_semaphore(hw);
937 1.172 msaitoh
938 1.113 msaitoh hw->mac.ops.set_lan_id(hw);
939 1.333 msaitoh ixgbe_init_device_features(sc);
940 1.99 msaitoh
941 1.333 msaitoh if (ixgbe_configure_interrupts(sc)) {
942 1.1 dyoung error = ENXIO;
943 1.1 dyoung goto err_out;
944 1.1 dyoung }
945 1.1 dyoung
946 1.99 msaitoh /* Allocate multicast array memory. */
947 1.333 msaitoh sc->mta = malloc(sizeof(*sc->mta) *
948 1.215 chs MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949 1.99 msaitoh
950 1.99 msaitoh /* Enable WoL (if supported) */
951 1.333 msaitoh ixgbe_check_wol_support(sc);
952 1.99 msaitoh
953 1.193 msaitoh /* Register for VLAN events */
954 1.333 msaitoh ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb);
955 1.193 msaitoh
956 1.99 msaitoh /* Verify adapter fan is still functional (if applicable) */
957 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 1.99 msaitoh u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 1.333 msaitoh ixgbe_check_fan_failure(sc, esdp, FALSE);
960 1.99 msaitoh }
961 1.99 msaitoh
962 1.99 msaitoh /* Set an initial default flow control value */
963 1.99 msaitoh hw->fc.requested_mode = ixgbe_flow_control;
964 1.99 msaitoh
965 1.1 dyoung /* Do descriptor calc and sanity checks */
966 1.1 dyoung if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 1.1 dyoung ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 1.336 msaitoh aprint_error_dev(dev, "Invalid TX ring size (%d). "
969 1.336 msaitoh "It must be between %d and %d, "
970 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
971 1.336 msaitoh "Using default value of %d instead.\n",
972 1.336 msaitoh ixgbe_txd, MIN_TXD, MAX_TXD,
973 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
974 1.336 msaitoh DEFAULT_TXD);
975 1.333 msaitoh sc->num_tx_desc = DEFAULT_TXD;
976 1.1 dyoung } else
977 1.333 msaitoh sc->num_tx_desc = ixgbe_txd;
978 1.1 dyoung
979 1.1 dyoung if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 1.33 msaitoh ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 1.336 msaitoh aprint_error_dev(dev, "Invalid RX ring size (%d). "
982 1.336 msaitoh "It must be between %d and %d, "
983 1.336 msaitoh "inclusive, and must be a multiple of %zu. "
984 1.336 msaitoh "Using default value of %d instead.\n",
985 1.336 msaitoh ixgbe_rxd, MIN_RXD, MAX_RXD,
986 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
987 1.336 msaitoh DEFAULT_RXD);
988 1.333 msaitoh sc->num_rx_desc = DEFAULT_RXD;
989 1.1 dyoung } else
990 1.333 msaitoh sc->num_rx_desc = ixgbe_rxd;
991 1.1 dyoung
992 1.313 msaitoh /* Sysctls for limiting the amount of work done in the taskqueues */
993 1.333 msaitoh sc->rx_process_limit
994 1.333 msaitoh = (ixgbe_rx_process_limit <= sc->num_rx_desc)
995 1.333 msaitoh ? ixgbe_rx_process_limit : sc->num_rx_desc;
996 1.333 msaitoh sc->tx_process_limit
997 1.333 msaitoh = (ixgbe_tx_process_limit <= sc->num_tx_desc)
998 1.333 msaitoh ? ixgbe_tx_process_limit : sc->num_tx_desc;
999 1.313 msaitoh
1000 1.286 msaitoh /* Set default high limit of copying mbuf in rxeof */
1001 1.333 msaitoh sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
1002 1.286 msaitoh
1003 1.1 dyoung /* Allocate our TX/RX Queues */
1004 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1005 1.1 dyoung error = ENOMEM;
1006 1.1 dyoung goto err_out;
1007 1.1 dyoung }
1008 1.1 dyoung
1009 1.99 msaitoh hw->phy.reset_if_overtemp = TRUE;
1010 1.99 msaitoh error = ixgbe_reset_hw(hw);
1011 1.99 msaitoh hw->phy.reset_if_overtemp = FALSE;
1012 1.237 msaitoh if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1013 1.99 msaitoh error = IXGBE_SUCCESS;
1014 1.237 msaitoh else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1015 1.324 msaitoh aprint_error_dev(dev,
1016 1.324 msaitoh "Unsupported SFP+ module type was detected.\n");
1017 1.219 msaitoh unsupported_sfp = true;
1018 1.219 msaitoh error = IXGBE_SUCCESS;
1019 1.1 dyoung } else if (error) {
1020 1.282 msaitoh aprint_error_dev(dev,
1021 1.282 msaitoh "Hardware initialization failed(error = %d)\n", error);
1022 1.1 dyoung error = EIO;
1023 1.1 dyoung goto err_late;
1024 1.1 dyoung }
1025 1.1 dyoung
1026 1.1 dyoung /* Make sure we have a good EEPROM before we read from it */
1027 1.333 msaitoh if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1028 1.48 msaitoh aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1029 1.1 dyoung error = EIO;
1030 1.1 dyoung goto err_late;
1031 1.1 dyoung }
1032 1.1 dyoung
1033 1.88 msaitoh aprint_normal("%s:", device_xname(dev));
1034 1.88 msaitoh /* NVM Image Version */
1035 1.169 msaitoh high = low = 0;
1036 1.88 msaitoh switch (hw->mac.type) {
1037 1.300 msaitoh case ixgbe_mac_82598EB:
1038 1.300 msaitoh /*
1039 1.300 msaitoh * Print version from the dev starter version (0x29). The
1040 1.300 msaitoh * location is the same as newer device's IXGBE_NVM_MAP_VER.
1041 1.300 msaitoh */
1042 1.300 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1043 1.300 msaitoh if (nvmreg == 0xffff)
1044 1.300 msaitoh break;
1045 1.300 msaitoh high = (nvmreg >> 12) & 0x0f;
1046 1.300 msaitoh low = (nvmreg >> 4) & 0xff;
1047 1.300 msaitoh id = nvmreg & 0x0f;
1048 1.300 msaitoh /*
1049 1.300 msaitoh * The following output might not be correct. Some 82598 cards
1050 1.300 msaitoh * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1051 1.300 msaitoh */
1052 1.300 msaitoh aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1053 1.300 msaitoh break;
1054 1.88 msaitoh case ixgbe_mac_X540:
1055 1.99 msaitoh case ixgbe_mac_X550EM_a:
1056 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1057 1.88 msaitoh if (nvmreg == 0xffff)
1058 1.88 msaitoh break;
1059 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1060 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1061 1.88 msaitoh id = nvmreg & 0x0f;
1062 1.107 msaitoh aprint_normal(" NVM Image Version %u.", high);
1063 1.107 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1064 1.107 msaitoh str = "%x";
1065 1.107 msaitoh else
1066 1.107 msaitoh str = "%02x";
1067 1.107 msaitoh aprint_normal(str, low);
1068 1.107 msaitoh aprint_normal(" ID 0x%x,", id);
1069 1.88 msaitoh break;
1070 1.88 msaitoh case ixgbe_mac_X550EM_x:
1071 1.88 msaitoh case ixgbe_mac_X550:
1072 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1073 1.88 msaitoh if (nvmreg == 0xffff)
1074 1.88 msaitoh break;
1075 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1076 1.88 msaitoh low = nvmreg & 0xff;
1077 1.107 msaitoh aprint_normal(" NVM Image Version %u.%02x,", high, low);
1078 1.88 msaitoh break;
1079 1.88 msaitoh default:
1080 1.88 msaitoh break;
1081 1.88 msaitoh }
1082 1.169 msaitoh hw->eeprom.nvm_image_ver_high = high;
1083 1.169 msaitoh hw->eeprom.nvm_image_ver_low = low;
1084 1.88 msaitoh
1085 1.88 msaitoh /* PHY firmware revision */
1086 1.88 msaitoh switch (hw->mac.type) {
1087 1.88 msaitoh case ixgbe_mac_X540:
1088 1.88 msaitoh case ixgbe_mac_X550:
1089 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1090 1.88 msaitoh if (nvmreg == 0xffff)
1091 1.88 msaitoh break;
1092 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1093 1.88 msaitoh low = (nvmreg >> 4) & 0xff;
1094 1.88 msaitoh id = nvmreg & 0x000f;
1095 1.114 msaitoh aprint_normal(" PHY FW Revision %u.", high);
1096 1.114 msaitoh if (hw->mac.type == ixgbe_mac_X540)
1097 1.114 msaitoh str = "%x";
1098 1.114 msaitoh else
1099 1.114 msaitoh str = "%02x";
1100 1.114 msaitoh aprint_normal(str, low);
1101 1.114 msaitoh aprint_normal(" ID 0x%x,", id);
1102 1.88 msaitoh break;
1103 1.88 msaitoh default:
1104 1.88 msaitoh break;
1105 1.88 msaitoh }
1106 1.88 msaitoh
1107 1.88 msaitoh /* NVM Map version & OEM NVM Image version */
1108 1.88 msaitoh switch (hw->mac.type) {
1109 1.88 msaitoh case ixgbe_mac_X550:
1110 1.88 msaitoh case ixgbe_mac_X550EM_x:
1111 1.99 msaitoh case ixgbe_mac_X550EM_a:
1112 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1113 1.88 msaitoh if (nvmreg != 0xffff) {
1114 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1115 1.88 msaitoh low = nvmreg & 0x00ff;
1116 1.88 msaitoh aprint_normal(" NVM Map version %u.%02x,", high, low);
1117 1.88 msaitoh }
1118 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1119 1.107 msaitoh if (nvmreg != 0xffff) {
1120 1.88 msaitoh high = (nvmreg >> 12) & 0x0f;
1121 1.88 msaitoh low = nvmreg & 0x00ff;
1122 1.88 msaitoh aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1123 1.88 msaitoh low);
1124 1.88 msaitoh }
1125 1.88 msaitoh break;
1126 1.88 msaitoh default:
1127 1.88 msaitoh break;
1128 1.88 msaitoh }
1129 1.88 msaitoh
1130 1.88 msaitoh /* Print the ETrackID */
1131 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1132 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1133 1.88 msaitoh aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1134 1.79 msaitoh
1135 1.307 msaitoh /* Printed Board Assembly number */
1136 1.307 msaitoh error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
1137 1.307 msaitoh aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
1138 1.307 msaitoh
1139 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
1140 1.333 msaitoh error = ixgbe_allocate_msix(sc, pa);
1141 1.119 msaitoh if (error) {
1142 1.119 msaitoh /* Free allocated queue structures first */
1143 1.333 msaitoh ixgbe_free_queues(sc);
1144 1.119 msaitoh
1145 1.119 msaitoh /* Fallback to legacy interrupt */
1146 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_MSI)
1147 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
1148 1.333 msaitoh sc->num_queues = 1;
1149 1.119 msaitoh
1150 1.119 msaitoh /* Allocate our TX/RX Queues again */
1151 1.333 msaitoh if (ixgbe_allocate_queues(sc)) {
1152 1.119 msaitoh error = ENOMEM;
1153 1.119 msaitoh goto err_out;
1154 1.119 msaitoh }
1155 1.119 msaitoh }
1156 1.119 msaitoh }
1157 1.307 msaitoh
1158 1.169 msaitoh /* Recovery mode */
1159 1.333 msaitoh switch (sc->hw.mac.type) {
1160 1.169 msaitoh case ixgbe_mac_X550:
1161 1.169 msaitoh case ixgbe_mac_X550EM_x:
1162 1.169 msaitoh case ixgbe_mac_X550EM_a:
1163 1.169 msaitoh /* >= 2.00 */
1164 1.169 msaitoh if (hw->eeprom.nvm_image_ver_high >= 2) {
1165 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1166 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1167 1.169 msaitoh }
1168 1.169 msaitoh break;
1169 1.169 msaitoh default:
1170 1.169 msaitoh break;
1171 1.169 msaitoh }
1172 1.169 msaitoh
1173 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) == 0)
1174 1.333 msaitoh error = ixgbe_allocate_legacy(sc, pa);
1175 1.185 msaitoh if (error)
1176 1.99 msaitoh goto err_late;
1177 1.99 msaitoh
1178 1.119 msaitoh /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1179 1.333 msaitoh mutex_init(&(sc)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1180 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1181 1.333 msaitoh error = workqueue_create(&sc->admin_wq, wqname,
1182 1.333 msaitoh ixgbe_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
1183 1.223 thorpej IXGBE_TASKLET_WQ_FLAGS);
1184 1.223 thorpej if (error) {
1185 1.223 thorpej aprint_error_dev(dev,
1186 1.233 msaitoh "could not create admin workqueue (%d)\n", error);
1187 1.223 thorpej goto err_out;
1188 1.223 thorpej }
1189 1.119 msaitoh
1190 1.99 msaitoh error = ixgbe_start_hw(hw);
1191 1.25 msaitoh switch (error) {
1192 1.25 msaitoh case IXGBE_ERR_EEPROM_VERSION:
1193 1.319 msaitoh aprint_error_dev(dev,
1194 1.319 msaitoh "This device is a pre-production adapter/"
1195 1.1 dyoung "LOM. Please be aware there may be issues associated "
1196 1.48 msaitoh "with your hardware.\nIf you are experiencing problems "
1197 1.1 dyoung "please contact your Intel or hardware representative "
1198 1.1 dyoung "who provided you with this hardware.\n");
1199 1.25 msaitoh break;
1200 1.25 msaitoh default:
1201 1.25 msaitoh break;
1202 1.1 dyoung }
1203 1.1 dyoung
1204 1.116 msaitoh /* Setup OS specific network interface */
1205 1.333 msaitoh if (ixgbe_setup_interface(dev, sc) != 0)
1206 1.116 msaitoh goto err_late;
1207 1.116 msaitoh
1208 1.110 msaitoh /*
1209 1.110 msaitoh * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1210 1.110 msaitoh * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1211 1.110 msaitoh */
1212 1.110 msaitoh if (hw->phy.media_type == ixgbe_media_type_copper) {
1213 1.95 msaitoh uint16_t id1, id2;
1214 1.95 msaitoh int oui, model, rev;
1215 1.285 pgoyette char descr[MII_MAX_DESCR_LEN];
1216 1.95 msaitoh
1217 1.95 msaitoh id1 = hw->phy.id >> 16;
1218 1.95 msaitoh id2 = hw->phy.id & 0xffff;
1219 1.95 msaitoh oui = MII_OUI(id1, id2);
1220 1.95 msaitoh model = MII_MODEL(id2);
1221 1.95 msaitoh rev = MII_REV(id2);
1222 1.285 pgoyette mii_get_descr(descr, sizeof(descr), oui, model);
1223 1.285 pgoyette if (descr[0])
1224 1.299 msaitoh aprint_normal_dev(dev, "PHY: %s, rev. %d\n",
1225 1.299 msaitoh descr, rev);
1226 1.95 msaitoh else
1227 1.95 msaitoh aprint_normal_dev(dev,
1228 1.95 msaitoh "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1229 1.95 msaitoh oui, model, rev);
1230 1.95 msaitoh }
1231 1.95 msaitoh
1232 1.173 msaitoh /* Enable EEE power saving */
1233 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
1234 1.173 msaitoh hw->mac.ops.setup_eee(hw,
1235 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
1236 1.173 msaitoh
1237 1.52 msaitoh /* Enable power to the phy. */
1238 1.219 msaitoh if (!unsupported_sfp) {
1239 1.219 msaitoh /* Enable the optics for 82599 SFP+ fiber */
1240 1.219 msaitoh ixgbe_enable_tx_laser(hw);
1241 1.219 msaitoh
1242 1.219 msaitoh /*
1243 1.219 msaitoh * XXX Currently, ixgbe_set_phy_power() supports only copper
1244 1.219 msaitoh * PHY, so it's not required to test with !unsupported_sfp.
1245 1.219 msaitoh */
1246 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
1247 1.219 msaitoh }
1248 1.52 msaitoh
1249 1.1 dyoung /* Initialize statistics */
1250 1.333 msaitoh ixgbe_update_stats_counters(sc);
1251 1.1 dyoung
1252 1.98 msaitoh /* Check PCIE slot type/speed/width */
1253 1.333 msaitoh ixgbe_get_slot_info(sc);
1254 1.1 dyoung
1255 1.99 msaitoh /*
1256 1.99 msaitoh * Do time init and sysctl init here, but
1257 1.99 msaitoh * only on the first port of a bypass adapter.
1258 1.99 msaitoh */
1259 1.333 msaitoh ixgbe_bypass_init(sc);
1260 1.99 msaitoh
1261 1.99 msaitoh /* Set an initial dmac value */
1262 1.333 msaitoh sc->dmac = 0;
1263 1.99 msaitoh /* Set initial advertised speeds (if applicable) */
1264 1.333 msaitoh sc->advertise = ixgbe_get_default_advertise(sc);
1265 1.45 msaitoh
1266 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1267 1.99 msaitoh ixgbe_define_iov_schemas(dev, &error);
1268 1.44 msaitoh
1269 1.44 msaitoh /* Add sysctls */
1270 1.333 msaitoh ixgbe_add_device_sysctls(sc);
1271 1.333 msaitoh ixgbe_add_hw_stats(sc);
1272 1.44 msaitoh
1273 1.99 msaitoh /* For Netmap */
1274 1.333 msaitoh sc->init_locked = ixgbe_init_locked;
1275 1.333 msaitoh sc->stop_locked = ixgbe_stop_locked;
1276 1.99 msaitoh
1277 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
1278 1.333 msaitoh ixgbe_netmap_attach(sc);
1279 1.1 dyoung
1280 1.340 msaitoh /* Print some flags */
1281 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap);
1282 1.99 msaitoh aprint_verbose_dev(dev, "feature cap %s\n", buf);
1283 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en);
1284 1.99 msaitoh aprint_verbose_dev(dev, "feature ena %s\n", buf);
1285 1.340 msaitoh if (ixgbe_get_device_caps(hw, &dev_caps) == 0) {
1286 1.340 msaitoh snprintb(buf, sizeof(buf), IXGBE_DEVICE_CAPS_FLAGS, dev_caps);
1287 1.340 msaitoh aprint_verbose_dev(dev, "device cap %s\n", buf);
1288 1.340 msaitoh }
1289 1.44 msaitoh
1290 1.44 msaitoh if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1291 1.333 msaitoh pmf_class_network_register(dev, sc->ifp);
1292 1.44 msaitoh else
1293 1.44 msaitoh aprint_error_dev(dev, "couldn't establish power handler\n");
1294 1.44 msaitoh
1295 1.169 msaitoh /* Init recovery mode timer and state variable */
1296 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1297 1.333 msaitoh sc->recovery_mode = 0;
1298 1.169 msaitoh
1299 1.169 msaitoh /* Set up the timer callout */
1300 1.333 msaitoh callout_init(&sc->recovery_mode_timer,
1301 1.169 msaitoh IXGBE_CALLOUT_FLAGS);
1302 1.235 msaitoh snprintf(wqname, sizeof(wqname), "%s-recovery",
1303 1.235 msaitoh device_xname(dev));
1304 1.333 msaitoh error = workqueue_create(&sc->recovery_mode_timer_wq,
1305 1.333 msaitoh wqname, ixgbe_handle_recovery_mode_timer, sc,
1306 1.233 msaitoh IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1307 1.233 msaitoh if (error) {
1308 1.233 msaitoh aprint_error_dev(dev, "could not create "
1309 1.233 msaitoh "recovery_mode_timer workqueue (%d)\n", error);
1310 1.233 msaitoh goto err_out;
1311 1.233 msaitoh }
1312 1.169 msaitoh
1313 1.169 msaitoh /* Start the task */
1314 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
1315 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
1316 1.169 msaitoh }
1317 1.169 msaitoh
1318 1.1 dyoung INIT_DEBUGOUT("ixgbe_attach: end");
1319 1.333 msaitoh sc->osdep.attached = true;
1320 1.98 msaitoh
1321 1.1 dyoung return;
1322 1.43 msaitoh
1323 1.1 dyoung err_late:
1324 1.333 msaitoh ixgbe_free_queues(sc);
1325 1.1 dyoung err_out:
1326 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1327 1.99 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1328 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1329 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
1330 1.333 msaitoh ixgbe_free_pci_resources(sc);
1331 1.333 msaitoh if (sc->mta != NULL)
1332 1.333 msaitoh free(sc->mta, M_DEVBUF);
1333 1.333 msaitoh mutex_destroy(&(sc)->admin_mtx); /* XXX appropriate order? */
1334 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
1335 1.99 msaitoh
1336 1.1 dyoung return;
1337 1.99 msaitoh } /* ixgbe_attach */
1338 1.1 dyoung
1339 1.99 msaitoh /************************************************************************
1340 1.99 msaitoh * ixgbe_check_wol_support
1341 1.99 msaitoh *
1342 1.99 msaitoh * Checks whether the adapter's ports are capable of
1343 1.99 msaitoh * Wake On LAN by reading the adapter's NVM.
1344 1.1 dyoung *
1345 1.99 msaitoh * Sets each port's hw->wol_enabled value depending
1346 1.99 msaitoh * on the value read here.
1347 1.99 msaitoh ************************************************************************/
1348 1.98 msaitoh static void
1349 1.333 msaitoh ixgbe_check_wol_support(struct ixgbe_softc *sc)
1350 1.98 msaitoh {
1351 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1352 1.186 msaitoh u16 dev_caps = 0;
1353 1.1 dyoung
1354 1.98 msaitoh /* Find out WoL support for port */
1355 1.333 msaitoh sc->wol_support = hw->wol_enabled = 0;
1356 1.98 msaitoh ixgbe_get_device_caps(hw, &dev_caps);
1357 1.98 msaitoh if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1358 1.98 msaitoh ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1359 1.99 msaitoh hw->bus.func == 0))
1360 1.333 msaitoh sc->wol_support = hw->wol_enabled = 1;
1361 1.98 msaitoh
1362 1.98 msaitoh /* Save initial wake up filter configuration */
1363 1.333 msaitoh sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1364 1.98 msaitoh
1365 1.98 msaitoh return;
1366 1.99 msaitoh } /* ixgbe_check_wol_support */
1367 1.98 msaitoh
1368 1.99 msaitoh /************************************************************************
1369 1.99 msaitoh * ixgbe_setup_interface
1370 1.98 msaitoh *
1371 1.99 msaitoh * Setup networking device structure and register an interface.
1372 1.99 msaitoh ************************************************************************/
1373 1.1 dyoung static int
1374 1.333 msaitoh ixgbe_setup_interface(device_t dev, struct ixgbe_softc *sc)
1375 1.1 dyoung {
1376 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
1377 1.98 msaitoh struct ifnet *ifp;
1378 1.1 dyoung
1379 1.98 msaitoh INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1380 1.1 dyoung
1381 1.333 msaitoh ifp = sc->ifp = &ec->ec_if;
1382 1.98 msaitoh strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1383 1.98 msaitoh ifp->if_baudrate = IF_Gbps(10);
1384 1.98 msaitoh ifp->if_init = ixgbe_init;
1385 1.98 msaitoh ifp->if_stop = ixgbe_ifstop;
1386 1.333 msaitoh ifp->if_softc = sc;
1387 1.98 msaitoh ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1388 1.98 msaitoh #ifdef IXGBE_MPSAFE
1389 1.112 ozaki ifp->if_extflags = IFEF_MPSAFE;
1390 1.98 msaitoh #endif
1391 1.98 msaitoh ifp->if_ioctl = ixgbe_ioctl;
1392 1.98 msaitoh #if __FreeBSD_version >= 1100045
1393 1.98 msaitoh /* TSO parameters */
1394 1.98 msaitoh ifp->if_hw_tsomax = 65518;
1395 1.98 msaitoh ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1396 1.98 msaitoh ifp->if_hw_tsomaxsegsize = 2048;
1397 1.98 msaitoh #endif
1398 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1399 1.99 msaitoh #if 0
1400 1.99 msaitoh ixgbe_start_locked = ixgbe_legacy_start_locked;
1401 1.99 msaitoh #endif
1402 1.99 msaitoh } else {
1403 1.99 msaitoh ifp->if_transmit = ixgbe_mq_start;
1404 1.99 msaitoh #if 0
1405 1.99 msaitoh ixgbe_start_locked = ixgbe_mq_start_locked;
1406 1.29 msaitoh #endif
1407 1.99 msaitoh }
1408 1.99 msaitoh ifp->if_start = ixgbe_legacy_start;
1409 1.333 msaitoh IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2);
1410 1.98 msaitoh IFQ_SET_READY(&ifp->if_snd);
1411 1.98 msaitoh
1412 1.284 riastrad if_initialize(ifp);
1413 1.333 msaitoh sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if);
1414 1.333 msaitoh ether_ifattach(ifp, sc->hw.mac.addr);
1415 1.216 msaitoh aprint_normal_dev(dev, "Ethernet address %s\n",
1416 1.333 msaitoh ether_sprintf(sc->hw.mac.addr));
1417 1.98 msaitoh /*
1418 1.98 msaitoh * We use per TX queue softint, so if_deferred_start_init() isn't
1419 1.98 msaitoh * used.
1420 1.98 msaitoh */
1421 1.98 msaitoh ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1422 1.98 msaitoh
1423 1.333 msaitoh sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1424 1.98 msaitoh
1425 1.98 msaitoh /*
1426 1.98 msaitoh * Tell the upper layer(s) we support long frames.
1427 1.98 msaitoh */
1428 1.98 msaitoh ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1429 1.98 msaitoh
1430 1.98 msaitoh /* Set capability flags */
1431 1.98 msaitoh ifp->if_capabilities |= IFCAP_RXCSUM
1432 1.186 msaitoh | IFCAP_TXCSUM
1433 1.186 msaitoh | IFCAP_TSOv4
1434 1.186 msaitoh | IFCAP_TSOv6;
1435 1.98 msaitoh ifp->if_capenable = 0;
1436 1.98 msaitoh
1437 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1438 1.186 msaitoh | ETHERCAP_VLAN_HWCSUM
1439 1.186 msaitoh | ETHERCAP_JUMBO_MTU
1440 1.186 msaitoh | ETHERCAP_VLAN_MTU;
1441 1.98 msaitoh
1442 1.98 msaitoh /* Enable the above capabilities by default */
1443 1.98 msaitoh ec->ec_capenable = ec->ec_capabilities;
1444 1.98 msaitoh
1445 1.98 msaitoh /*
1446 1.99 msaitoh * Don't turn this on by default, if vlans are
1447 1.99 msaitoh * created on another pseudo device (eg. lagg)
1448 1.99 msaitoh * then vlan events are not passed thru, breaking
1449 1.99 msaitoh * operation, but with HW FILTER off it works. If
1450 1.99 msaitoh * using vlans directly on the ixgbe driver you can
1451 1.99 msaitoh * enable this and get full hardware tag filtering.
1452 1.99 msaitoh */
1453 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1454 1.1 dyoung
1455 1.98 msaitoh /*
1456 1.98 msaitoh * Specify the media types supported by this adapter and register
1457 1.98 msaitoh * callbacks to update media and link information
1458 1.98 msaitoh */
1459 1.333 msaitoh ec->ec_ifmedia = &sc->media;
1460 1.333 msaitoh ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixgbe_media_change,
1461 1.333 msaitoh ixgbe_media_status, &sc->core_mtx);
1462 1.45 msaitoh
1463 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1464 1.333 msaitoh ixgbe_add_media_types(sc);
1465 1.49 msaitoh
1466 1.98 msaitoh /* Set autoselect media by default */
1467 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1468 1.1 dyoung
1469 1.156 ozaki if_register(ifp);
1470 1.156 ozaki
1471 1.98 msaitoh return (0);
1472 1.99 msaitoh } /* ixgbe_setup_interface */
1473 1.1 dyoung
1474 1.99 msaitoh /************************************************************************
1475 1.99 msaitoh * ixgbe_add_media_types
1476 1.99 msaitoh ************************************************************************/
1477 1.98 msaitoh static void
1478 1.333 msaitoh ixgbe_add_media_types(struct ixgbe_softc *sc)
1479 1.98 msaitoh {
1480 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1481 1.186 msaitoh u64 layer;
1482 1.1 dyoung
1483 1.333 msaitoh layer = sc->phy_layer;
1484 1.1 dyoung
1485 1.98 msaitoh #define ADD(mm, dd) \
1486 1.333 msaitoh ifmedia_add(&sc->media, IFM_ETHER | (mm), (dd), NULL);
1487 1.1 dyoung
1488 1.140 msaitoh ADD(IFM_NONE, 0);
1489 1.140 msaitoh
1490 1.98 msaitoh /* Media types with matching NetBSD media defines */
1491 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1492 1.98 msaitoh ADD(IFM_10G_T | IFM_FDX, 0);
1493 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1494 1.98 msaitoh ADD(IFM_1000_T | IFM_FDX, 0);
1495 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1496 1.98 msaitoh ADD(IFM_100_TX | IFM_FDX, 0);
1497 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1498 1.99 msaitoh ADD(IFM_10_T | IFM_FDX, 0);
1499 1.26 msaitoh
1500 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1501 1.319 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1502 1.98 msaitoh ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1503 1.1 dyoung
1504 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1505 1.98 msaitoh ADD(IFM_10G_LR | IFM_FDX, 0);
1506 1.319 msaitoh if (hw->phy.multispeed_fiber)
1507 1.98 msaitoh ADD(IFM_1000_LX | IFM_FDX, 0);
1508 1.98 msaitoh }
1509 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1510 1.98 msaitoh ADD(IFM_10G_SR | IFM_FDX, 0);
1511 1.319 msaitoh if (hw->phy.multispeed_fiber)
1512 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1513 1.319 msaitoh } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1514 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0);
1515 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1516 1.98 msaitoh ADD(IFM_10G_CX4 | IFM_FDX, 0);
1517 1.1 dyoung
1518 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1519 1.98 msaitoh ADD(IFM_10G_KR | IFM_FDX, 0);
1520 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1521 1.180 msaitoh ADD(IFM_10G_KX4 | IFM_FDX, 0);
1522 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1523 1.98 msaitoh ADD(IFM_1000_KX | IFM_FDX, 0);
1524 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1525 1.99 msaitoh ADD(IFM_2500_KX | IFM_FDX, 0);
1526 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1527 1.103 msaitoh ADD(IFM_2500_T | IFM_FDX, 0);
1528 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T)
1529 1.103 msaitoh ADD(IFM_5000_T | IFM_FDX, 0);
1530 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1531 1.208 msaitoh ADD(IFM_1000_BX10 | IFM_FDX, 0);
1532 1.98 msaitoh /* XXX no ifmedia_set? */
1533 1.185 msaitoh
1534 1.98 msaitoh ADD(IFM_AUTO, 0);
1535 1.98 msaitoh
1536 1.98 msaitoh #undef ADD
1537 1.99 msaitoh } /* ixgbe_add_media_types */
1538 1.1 dyoung
1539 1.99 msaitoh /************************************************************************
1540 1.99 msaitoh * ixgbe_is_sfp
1541 1.99 msaitoh ************************************************************************/
1542 1.99 msaitoh static inline bool
1543 1.99 msaitoh ixgbe_is_sfp(struct ixgbe_hw *hw)
1544 1.99 msaitoh {
1545 1.99 msaitoh switch (hw->mac.type) {
1546 1.99 msaitoh case ixgbe_mac_82598EB:
1547 1.99 msaitoh if (hw->phy.type == ixgbe_phy_nl)
1548 1.144 msaitoh return (TRUE);
1549 1.144 msaitoh return (FALSE);
1550 1.99 msaitoh case ixgbe_mac_82599EB:
1551 1.203 msaitoh case ixgbe_mac_X550EM_x:
1552 1.203 msaitoh case ixgbe_mac_X550EM_a:
1553 1.99 msaitoh switch (hw->mac.ops.get_media_type(hw)) {
1554 1.99 msaitoh case ixgbe_media_type_fiber:
1555 1.99 msaitoh case ixgbe_media_type_fiber_qsfp:
1556 1.144 msaitoh return (TRUE);
1557 1.99 msaitoh default:
1558 1.144 msaitoh return (FALSE);
1559 1.99 msaitoh }
1560 1.99 msaitoh default:
1561 1.144 msaitoh return (FALSE);
1562 1.99 msaitoh }
1563 1.99 msaitoh } /* ixgbe_is_sfp */
1564 1.99 msaitoh
1565 1.226 thorpej static void
1566 1.333 msaitoh ixgbe_schedule_admin_tasklet(struct ixgbe_softc *sc)
1567 1.226 thorpej {
1568 1.243 msaitoh
1569 1.333 msaitoh KASSERT(mutex_owned(&sc->admin_mtx));
1570 1.260 knakahar
1571 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
1572 1.333 msaitoh if (sc->admin_pending == 0)
1573 1.333 msaitoh workqueue_enqueue(sc->admin_wq,
1574 1.333 msaitoh &sc->admin_wc, NULL);
1575 1.333 msaitoh sc->admin_pending = 1;
1576 1.255 msaitoh }
1577 1.226 thorpej }
1578 1.226 thorpej
1579 1.99 msaitoh /************************************************************************
1580 1.99 msaitoh * ixgbe_config_link
1581 1.99 msaitoh ************************************************************************/
1582 1.98 msaitoh static void
1583 1.333 msaitoh ixgbe_config_link(struct ixgbe_softc *sc)
1584 1.98 msaitoh {
1585 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1586 1.186 msaitoh u32 autoneg, err = 0;
1587 1.233 msaitoh u32 task_requests = 0;
1588 1.186 msaitoh bool sfp, negotiate = false;
1589 1.1 dyoung
1590 1.98 msaitoh sfp = ixgbe_is_sfp(hw);
1591 1.1 dyoung
1592 1.185 msaitoh if (sfp) {
1593 1.99 msaitoh if (hw->phy.multispeed_fiber) {
1594 1.99 msaitoh ixgbe_enable_tx_laser(hw);
1595 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1596 1.99 msaitoh }
1597 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1598 1.260 knakahar
1599 1.333 msaitoh mutex_enter(&sc->admin_mtx);
1600 1.333 msaitoh sc->task_requests |= task_requests;
1601 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
1602 1.333 msaitoh mutex_exit(&sc->admin_mtx);
1603 1.98 msaitoh } else {
1604 1.333 msaitoh struct ifmedia *ifm = &sc->media;
1605 1.143 msaitoh
1606 1.98 msaitoh if (hw->mac.ops.check_link)
1607 1.333 msaitoh err = ixgbe_check_link(hw, &sc->link_speed,
1608 1.333 msaitoh &sc->link_up, FALSE);
1609 1.98 msaitoh if (err)
1610 1.144 msaitoh return;
1611 1.143 msaitoh
1612 1.143 msaitoh /*
1613 1.143 msaitoh * Check if it's the first call. If it's the first call,
1614 1.143 msaitoh * get value for auto negotiation.
1615 1.143 msaitoh */
1616 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
1617 1.143 msaitoh if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1618 1.143 msaitoh && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1619 1.186 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1620 1.99 msaitoh &negotiate);
1621 1.98 msaitoh if (err)
1622 1.144 msaitoh return;
1623 1.98 msaitoh if (hw->mac.ops.setup_link)
1624 1.186 msaitoh err = hw->mac.ops.setup_link(hw, autoneg,
1625 1.333 msaitoh sc->link_up);
1626 1.98 msaitoh }
1627 1.99 msaitoh } /* ixgbe_config_link */
1628 1.98 msaitoh
1629 1.99 msaitoh /************************************************************************
1630 1.99 msaitoh * ixgbe_update_stats_counters - Update board statistics counters.
1631 1.99 msaitoh ************************************************************************/
1632 1.98 msaitoh static void
1633 1.333 msaitoh ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1634 1.1 dyoung {
1635 1.333 msaitoh struct ifnet *ifp = sc->ifp;
1636 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1637 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1638 1.305 msaitoh u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1639 1.304 msaitoh u64 total, total_missed_rx = 0;
1640 1.303 msaitoh uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1641 1.186 msaitoh unsigned int queue_counters;
1642 1.176 msaitoh int i;
1643 1.44 msaitoh
1644 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1645 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1646 1.303 msaitoh
1647 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1648 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1649 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
1650 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1651 1.44 msaitoh
1652 1.176 msaitoh /* 16 registers exist */
1653 1.333 msaitoh queue_counters = uimin(__arraycount(stats->qprc), sc->num_queues);
1654 1.176 msaitoh for (i = 0; i < queue_counters; i++) {
1655 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1656 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1657 1.329 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1658 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbrc[i],
1659 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)) +
1660 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32));
1661 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbtc[i],
1662 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)) +
1663 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32));
1664 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
1665 1.329 msaitoh } else {
1666 1.329 msaitoh /* 82598 */
1667 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBRC(i), qbrc[i]);
1668 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBTC(i), qbtc[i]);
1669 1.329 msaitoh }
1670 1.98 msaitoh }
1671 1.151 msaitoh
1672 1.175 msaitoh /* 8 registers exist */
1673 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1674 1.98 msaitoh uint32_t mp;
1675 1.44 msaitoh
1676 1.151 msaitoh /* MPC */
1677 1.98 msaitoh mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1678 1.98 msaitoh /* global total per queue */
1679 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpc[i], mp);
1680 1.98 msaitoh /* running comprehensive total for stats display */
1681 1.98 msaitoh total_missed_rx += mp;
1682 1.44 msaitoh
1683 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1684 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1685 1.151 msaitoh
1686 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1687 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1688 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) {
1689 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1690 1.319 msaitoh IXGBE_PXONRXCNT(i), pxonrxc[i]);
1691 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1692 1.319 msaitoh IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1693 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1694 1.319 msaitoh IXGBE_PXON2OFFCNT(i), pxon2offc[i]);
1695 1.151 msaitoh } else {
1696 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1697 1.319 msaitoh IXGBE_PXONRXC(i), pxonrxc[i]);
1698 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats,
1699 1.319 msaitoh IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1700 1.151 msaitoh }
1701 1.98 msaitoh }
1702 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1703 1.44 msaitoh
1704 1.98 msaitoh /* Document says M[LR]FC are valid when link is up and 10Gbps */
1705 1.333 msaitoh if ((sc->link_active == LINK_STATE_UP)
1706 1.333 msaitoh && (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1707 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1708 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1709 1.98 msaitoh }
1710 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
1711 1.326 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LINK_DN_CNT, link_dn_cnt);
1712 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1713 1.44 msaitoh
1714 1.98 msaitoh /* Hardware workaround, gprc counts missed packets */
1715 1.305 msaitoh IXGBE_EVC_ADD(&stats->gprc,
1716 1.305 msaitoh IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1717 1.44 msaitoh
1718 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1719 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1720 1.305 msaitoh total = lxontxc + lxofftxc;
1721 1.44 msaitoh
1722 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1723 1.305 msaitoh IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1724 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1725 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1726 1.280 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1727 1.305 msaitoh - total * ETHER_MIN_LEN);
1728 1.305 msaitoh IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1729 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1730 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1731 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1732 1.98 msaitoh } else {
1733 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1734 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1735 1.98 msaitoh /* 82598 only has a counter in the high register */
1736 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1737 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1738 1.305 msaitoh - total * ETHER_MIN_LEN);
1739 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1740 1.98 msaitoh }
1741 1.44 msaitoh
1742 1.98 msaitoh /*
1743 1.98 msaitoh * Workaround: mprc hardware is incorrectly counting
1744 1.98 msaitoh * broadcasts, so for now we subtract those.
1745 1.98 msaitoh */
1746 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1747 1.305 msaitoh IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1748 1.305 msaitoh - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1749 1.305 msaitoh
1750 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1751 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1752 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1753 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1754 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1755 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1756 1.305 msaitoh
1757 1.305 msaitoh IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1758 1.305 msaitoh IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1759 1.305 msaitoh IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1760 1.305 msaitoh
1761 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1762 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1763 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1764 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1765 1.305 msaitoh
1766 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1767 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1768 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1769 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1770 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1771 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1772 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1773 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1774 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1775 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1776 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1777 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1778 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1779 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1780 1.98 msaitoh /* Only read FCOE on 82599 */
1781 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
1782 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1783 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1784 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1785 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1786 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1787 1.98 msaitoh }
1788 1.44 msaitoh
1789 1.44 msaitoh /*
1790 1.224 msaitoh * Fill out the OS statistics structure. Only RX errors are required
1791 1.224 msaitoh * here because all TX counters are incremented in the TX path and
1792 1.224 msaitoh * normal RX counters are prepared in ether_input().
1793 1.44 msaitoh */
1794 1.222 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1795 1.222 thorpej if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1796 1.298 msaitoh
1797 1.298 msaitoh /*
1798 1.298 msaitoh * Aggregate following types of errors as RX errors:
1799 1.298 msaitoh * - CRC error count,
1800 1.298 msaitoh * - illegal byte error count,
1801 1.298 msaitoh * - length error count,
1802 1.298 msaitoh * - undersized packets count,
1803 1.298 msaitoh * - fragmented packets count,
1804 1.298 msaitoh * - oversized packets count,
1805 1.298 msaitoh * - jabber count.
1806 1.298 msaitoh */
1807 1.298 msaitoh if_statadd_ref(nsr, if_ierrors,
1808 1.303 msaitoh crcerrs + illerrc + rlec + ruc + rfc + roc + rjc);
1809 1.298 msaitoh
1810 1.222 thorpej IF_STAT_PUTREF(ifp);
1811 1.99 msaitoh } /* ixgbe_update_stats_counters */
1812 1.1 dyoung
1813 1.99 msaitoh /************************************************************************
1814 1.99 msaitoh * ixgbe_add_hw_stats
1815 1.99 msaitoh *
1816 1.99 msaitoh * Add sysctl variables, one per statistic, to the system.
1817 1.99 msaitoh ************************************************************************/
1818 1.98 msaitoh static void
1819 1.333 msaitoh ixgbe_add_hw_stats(struct ixgbe_softc *sc)
1820 1.1 dyoung {
1821 1.333 msaitoh device_t dev = sc->dev;
1822 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
1823 1.333 msaitoh struct sysctllog **log = &sc->sysctllog;
1824 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
1825 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
1826 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
1827 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
1828 1.98 msaitoh const char *xname = device_xname(dev);
1829 1.144 msaitoh int i;
1830 1.1 dyoung
1831 1.98 msaitoh /* Driver Statistics */
1832 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1833 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EFBIG");
1834 1.333 msaitoh evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1835 1.98 msaitoh NULL, xname, "m_defrag() failed");
1836 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1837 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EFBIG");
1838 1.333 msaitoh evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1839 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EINVAL");
1840 1.333 msaitoh evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC,
1841 1.98 msaitoh NULL, xname, "Driver tx dma hard fail other");
1842 1.333 msaitoh evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1843 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EAGAIN");
1844 1.333 msaitoh evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1845 1.98 msaitoh NULL, xname, "Driver tx dma soft fail ENOMEM");
1846 1.333 msaitoh evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC,
1847 1.98 msaitoh NULL, xname, "Watchdog timeouts");
1848 1.333 msaitoh evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC,
1849 1.98 msaitoh NULL, xname, "TSO errors");
1850 1.333 msaitoh evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR,
1851 1.233 msaitoh NULL, xname, "Admin MSI-X IRQ Handled");
1852 1.333 msaitoh evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR,
1853 1.233 msaitoh NULL, xname, "Link event");
1854 1.333 msaitoh evcnt_attach_dynamic(&sc->mod_workev, EVCNT_TYPE_INTR,
1855 1.233 msaitoh NULL, xname, "SFP+ module event");
1856 1.333 msaitoh evcnt_attach_dynamic(&sc->msf_workev, EVCNT_TYPE_INTR,
1857 1.233 msaitoh NULL, xname, "Multispeed event");
1858 1.333 msaitoh evcnt_attach_dynamic(&sc->phy_workev, EVCNT_TYPE_INTR,
1859 1.233 msaitoh NULL, xname, "External PHY event");
1860 1.1 dyoung
1861 1.168 msaitoh /* Max number of traffic class is 8 */
1862 1.168 msaitoh KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1863 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1864 1.333 msaitoh snprintf(sc->tcs[i].evnamebuf,
1865 1.333 msaitoh sizeof(sc->tcs[i].evnamebuf), "%s tc%d", xname, i);
1866 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
1867 1.168 msaitoh evcnt_attach_dynamic(&stats->mpc[i],
1868 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1869 1.168 msaitoh "RX Missed Packet Count");
1870 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
1871 1.168 msaitoh evcnt_attach_dynamic(&stats->rnbc[i],
1872 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1873 1.333 msaitoh sc->tcs[i].evnamebuf,
1874 1.168 msaitoh "Receive No Buffers");
1875 1.168 msaitoh }
1876 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
1877 1.168 msaitoh evcnt_attach_dynamic(&stats->pxontxc[i],
1878 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1879 1.331 msaitoh "Priority XON Transmitted");
1880 1.168 msaitoh evcnt_attach_dynamic(&stats->pxofftxc[i],
1881 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1882 1.331 msaitoh "Priority XOFF Transmitted");
1883 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
1884 1.168 msaitoh evcnt_attach_dynamic(&stats->pxon2offc[i],
1885 1.168 msaitoh EVCNT_TYPE_MISC, NULL,
1886 1.333 msaitoh sc->tcs[i].evnamebuf,
1887 1.331 msaitoh "Priority XON to XOFF");
1888 1.330 msaitoh evcnt_attach_dynamic(&stats->pxonrxc[i],
1889 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1890 1.331 msaitoh "Priority XON Received");
1891 1.330 msaitoh evcnt_attach_dynamic(&stats->pxoffrxc[i],
1892 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf,
1893 1.331 msaitoh "Priority XOFF Received");
1894 1.168 msaitoh }
1895 1.168 msaitoh }
1896 1.168 msaitoh
1897 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
1898 1.135 msaitoh #ifdef LRO
1899 1.135 msaitoh struct lro_ctrl *lro = &rxr->lro;
1900 1.327 msaitoh #endif
1901 1.135 msaitoh
1902 1.333 msaitoh snprintf(sc->queues[i].evnamebuf,
1903 1.333 msaitoh sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i);
1904 1.333 msaitoh snprintf(sc->queues[i].namebuf,
1905 1.333 msaitoh sizeof(sc->queues[i].namebuf), "q%d", i);
1906 1.1 dyoung
1907 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
1908 1.319 msaitoh aprint_error_dev(dev,
1909 1.319 msaitoh "could not create sysctl root\n");
1910 1.98 msaitoh break;
1911 1.98 msaitoh }
1912 1.1 dyoung
1913 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &rnode,
1914 1.98 msaitoh 0, CTLTYPE_NODE,
1915 1.333 msaitoh sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1916 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1917 1.98 msaitoh break;
1918 1.23 msaitoh
1919 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1920 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
1921 1.98 msaitoh "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1922 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler, 0,
1923 1.333 msaitoh (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1924 1.98 msaitoh break;
1925 1.1 dyoung
1926 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1927 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1928 1.98 msaitoh "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1929 1.98 msaitoh ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1930 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1931 1.98 msaitoh break;
1932 1.1 dyoung
1933 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1934 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
1935 1.98 msaitoh "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1936 1.98 msaitoh ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1937 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0)
1938 1.98 msaitoh break;
1939 1.1 dyoung
1940 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1941 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1942 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor next to check"),
1943 1.280 msaitoh ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1944 1.154 msaitoh CTL_CREATE, CTL_EOL) != 0)
1945 1.154 msaitoh break;
1946 1.154 msaitoh
1947 1.154 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1948 1.287 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1949 1.287 msaitoh SYSCTL_DESCR("Receive Descriptor next to refresh"),
1950 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1951 1.287 msaitoh CTL_CREATE, CTL_EOL) != 0)
1952 1.287 msaitoh break;
1953 1.287 msaitoh
1954 1.287 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1955 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1956 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Head"),
1957 1.98 msaitoh ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1958 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1959 1.33 msaitoh break;
1960 1.98 msaitoh
1961 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
1962 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1963 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Tail"),
1964 1.98 msaitoh ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1965 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0)
1966 1.28 msaitoh break;
1967 1.98 msaitoh
1968 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR,
1969 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "IRQs on queue");
1970 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].handleq,
1971 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf,
1972 1.327 msaitoh "Handled queue in softint");
1973 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC,
1974 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Requeued in softint");
1975 1.327 msaitoh if (i < __arraycount(stats->qbtc))
1976 1.327 msaitoh evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1977 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1978 1.328 msaitoh "Queue Bytes Transmitted (reg)");
1979 1.327 msaitoh evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1980 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1981 1.328 msaitoh "Queue Packets Transmitted (soft)");
1982 1.327 msaitoh if (i < __arraycount(stats->qptc))
1983 1.280 msaitoh evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1984 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1985 1.328 msaitoh "Queue Packets Transmitted (reg)");
1986 1.327 msaitoh #ifndef IXGBE_LEGACY_TX
1987 1.327 msaitoh evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1988 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1989 1.327 msaitoh "Packets dropped in pcq");
1990 1.327 msaitoh #endif
1991 1.327 msaitoh evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1992 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1993 1.327 msaitoh "TX Queue No Descriptor Available");
1994 1.327 msaitoh evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1995 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "TSO");
1996 1.327 msaitoh
1997 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1998 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
1999 1.328 msaitoh "Queue Bytes Received (soft)");
2000 1.327 msaitoh if (i < __arraycount(stats->qbrc))
2001 1.280 msaitoh evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
2002 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2003 1.328 msaitoh "Queue Bytes Received (reg)");
2004 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2005 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2006 1.328 msaitoh "Queue Packets Received (soft)");
2007 1.327 msaitoh if (i < __arraycount(stats->qprc))
2008 1.327 msaitoh evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
2009 1.333 msaitoh NULL, sc->queues[i].evnamebuf,
2010 1.328 msaitoh "Queue Packets Received (reg)");
2011 1.327 msaitoh if ((i < __arraycount(stats->qprdc)) &&
2012 1.327 msaitoh (hw->mac.type >= ixgbe_mac_82599EB))
2013 1.151 msaitoh evcnt_attach_dynamic(&stats->qprdc[i],
2014 1.151 msaitoh EVCNT_TYPE_MISC, NULL,
2015 1.333 msaitoh sc->queues[i].evnamebuf,
2016 1.328 msaitoh "Queue Packets Received Drop");
2017 1.33 msaitoh
2018 1.290 msaitoh evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
2019 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx no mbuf");
2020 1.98 msaitoh evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2021 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx discarded");
2022 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2023 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Copied RX Frames");
2024 1.98 msaitoh #ifdef LRO
2025 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2026 1.98 msaitoh CTLFLAG_RD, &lro->lro_queued, 0,
2027 1.98 msaitoh "LRO Queued");
2028 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2029 1.98 msaitoh CTLFLAG_RD, &lro->lro_flushed, 0,
2030 1.98 msaitoh "LRO Flushed");
2031 1.98 msaitoh #endif /* LRO */
2032 1.1 dyoung }
2033 1.28 msaitoh
2034 1.99 msaitoh /* MAC stats get their own sub node */
2035 1.98 msaitoh
2036 1.98 msaitoh snprintf(stats->namebuf,
2037 1.98 msaitoh sizeof(stats->namebuf), "%s MAC Statistics", xname);
2038 1.98 msaitoh
2039 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2040 1.98 msaitoh stats->namebuf, "rx csum offload - IP");
2041 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2042 1.98 msaitoh stats->namebuf, "rx csum offload - L4");
2043 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2044 1.98 msaitoh stats->namebuf, "rx csum offload - IP bad");
2045 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2046 1.98 msaitoh stats->namebuf, "rx csum offload - L4 bad");
2047 1.98 msaitoh evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
2048 1.98 msaitoh stats->namebuf, "Interrupt conditions zero");
2049 1.98 msaitoh evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
2050 1.98 msaitoh stats->namebuf, "Legacy interrupts");
2051 1.99 msaitoh
2052 1.98 msaitoh evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2053 1.98 msaitoh stats->namebuf, "CRC Errors");
2054 1.98 msaitoh evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2055 1.98 msaitoh stats->namebuf, "Illegal Byte Errors");
2056 1.98 msaitoh evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2057 1.98 msaitoh stats->namebuf, "Byte Errors");
2058 1.98 msaitoh evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2059 1.98 msaitoh stats->namebuf, "MAC Short Packets Discarded");
2060 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2061 1.98 msaitoh evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2062 1.98 msaitoh stats->namebuf, "Bad SFD");
2063 1.98 msaitoh evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2064 1.98 msaitoh stats->namebuf, "Total Packets Missed");
2065 1.98 msaitoh evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2066 1.98 msaitoh stats->namebuf, "MAC Local Faults");
2067 1.98 msaitoh evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2068 1.98 msaitoh stats->namebuf, "MAC Remote Faults");
2069 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2070 1.326 msaitoh evcnt_attach_dynamic(&stats->link_dn_cnt, EVCNT_TYPE_MISC,
2071 1.326 msaitoh NULL, stats->namebuf, "Link down event in the MAC");
2072 1.98 msaitoh evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2073 1.98 msaitoh stats->namebuf, "Receive Length Errors");
2074 1.98 msaitoh evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2075 1.98 msaitoh stats->namebuf, "Link XON Transmitted");
2076 1.330 msaitoh evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2077 1.330 msaitoh stats->namebuf, "Link XOFF Transmitted");
2078 1.98 msaitoh evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2079 1.98 msaitoh stats->namebuf, "Link XON Received");
2080 1.98 msaitoh evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2081 1.98 msaitoh stats->namebuf, "Link XOFF Received");
2082 1.98 msaitoh
2083 1.98 msaitoh /* Packet Reception Stats */
2084 1.98 msaitoh evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2085 1.98 msaitoh stats->namebuf, "Total Octets Received");
2086 1.98 msaitoh evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2087 1.98 msaitoh stats->namebuf, "Good Octets Received");
2088 1.98 msaitoh evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2089 1.98 msaitoh stats->namebuf, "Total Packets Received");
2090 1.98 msaitoh evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2091 1.98 msaitoh stats->namebuf, "Good Packets Received");
2092 1.98 msaitoh evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2093 1.98 msaitoh stats->namebuf, "Multicast Packets Received");
2094 1.98 msaitoh evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2095 1.98 msaitoh stats->namebuf, "Broadcast Packets Received");
2096 1.98 msaitoh evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2097 1.98 msaitoh stats->namebuf, "64 byte frames received ");
2098 1.98 msaitoh evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2099 1.98 msaitoh stats->namebuf, "65-127 byte frames received");
2100 1.98 msaitoh evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2101 1.98 msaitoh stats->namebuf, "128-255 byte frames received");
2102 1.98 msaitoh evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2103 1.98 msaitoh stats->namebuf, "256-511 byte frames received");
2104 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2105 1.98 msaitoh stats->namebuf, "512-1023 byte frames received");
2106 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2107 1.98 msaitoh stats->namebuf, "1023-1522 byte frames received");
2108 1.98 msaitoh evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2109 1.98 msaitoh stats->namebuf, "Receive Undersized");
2110 1.98 msaitoh evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2111 1.98 msaitoh stats->namebuf, "Fragmented Packets Received ");
2112 1.98 msaitoh evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2113 1.98 msaitoh stats->namebuf, "Oversized Packets Received");
2114 1.98 msaitoh evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2115 1.98 msaitoh stats->namebuf, "Received Jabber");
2116 1.98 msaitoh evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2117 1.98 msaitoh stats->namebuf, "Management Packets Received");
2118 1.98 msaitoh evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2119 1.98 msaitoh stats->namebuf, "Management Packets Dropped");
2120 1.98 msaitoh evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2121 1.98 msaitoh stats->namebuf, "Checksum Errors");
2122 1.1 dyoung
2123 1.98 msaitoh /* Packet Transmission Stats */
2124 1.98 msaitoh evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2125 1.98 msaitoh stats->namebuf, "Good Octets Transmitted");
2126 1.98 msaitoh evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2127 1.98 msaitoh stats->namebuf, "Total Packets Transmitted");
2128 1.98 msaitoh evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2129 1.98 msaitoh stats->namebuf, "Good Packets Transmitted");
2130 1.98 msaitoh evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2131 1.98 msaitoh stats->namebuf, "Broadcast Packets Transmitted");
2132 1.98 msaitoh evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2133 1.98 msaitoh stats->namebuf, "Multicast Packets Transmitted");
2134 1.98 msaitoh evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2135 1.98 msaitoh stats->namebuf, "Management Packets Transmitted");
2136 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2137 1.98 msaitoh stats->namebuf, "64 byte frames transmitted ");
2138 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2139 1.98 msaitoh stats->namebuf, "65-127 byte frames transmitted");
2140 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2141 1.98 msaitoh stats->namebuf, "128-255 byte frames transmitted");
2142 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2143 1.98 msaitoh stats->namebuf, "256-511 byte frames transmitted");
2144 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2145 1.98 msaitoh stats->namebuf, "512-1023 byte frames transmitted");
2146 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2147 1.98 msaitoh stats->namebuf, "1024-1522 byte frames transmitted");
2148 1.99 msaitoh } /* ixgbe_add_hw_stats */
2149 1.48 msaitoh
2150 1.1 dyoung static void
2151 1.333 msaitoh ixgbe_clear_evcnt(struct ixgbe_softc *sc)
2152 1.1 dyoung {
2153 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
2154 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
2155 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2156 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
2157 1.168 msaitoh int i;
2158 1.98 msaitoh
2159 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0);
2160 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0);
2161 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0);
2162 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0);
2163 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0);
2164 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0);
2165 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0);
2166 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, 0);
2167 1.333 msaitoh IXGBE_EVC_STORE(&sc->watchdog_events, 0);
2168 1.333 msaitoh IXGBE_EVC_STORE(&sc->admin_irqev, 0);
2169 1.333 msaitoh IXGBE_EVC_STORE(&sc->link_workev, 0);
2170 1.333 msaitoh IXGBE_EVC_STORE(&sc->mod_workev, 0);
2171 1.333 msaitoh IXGBE_EVC_STORE(&sc->msf_workev, 0);
2172 1.333 msaitoh IXGBE_EVC_STORE(&sc->phy_workev, 0);
2173 1.98 msaitoh
2174 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2175 1.168 msaitoh if (i < __arraycount(stats->mpc)) {
2176 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpc[i], 0);
2177 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
2178 1.305 msaitoh IXGBE_EVC_STORE(&stats->rnbc[i], 0);
2179 1.168 msaitoh }
2180 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) {
2181 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
2182 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
2183 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
2184 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
2185 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2186 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
2187 1.168 msaitoh }
2188 1.168 msaitoh }
2189 1.168 msaitoh
2190 1.333 msaitoh txr = sc->tx_rings;
2191 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
2192 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].irqs, 0);
2193 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].handleq, 0);
2194 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].req, 0);
2195 1.305 msaitoh IXGBE_EVC_STORE(&txr->total_packets, 0);
2196 1.98 msaitoh #ifndef IXGBE_LEGACY_TX
2197 1.305 msaitoh IXGBE_EVC_STORE(&txr->pcq_drops, 0);
2198 1.45 msaitoh #endif
2199 1.327 msaitoh IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
2200 1.327 msaitoh IXGBE_EVC_STORE(&txr->tso_tx, 0);
2201 1.134 msaitoh txr->q_efbig_tx_dma_setup = 0;
2202 1.134 msaitoh txr->q_mbuf_defrag_failed = 0;
2203 1.134 msaitoh txr->q_efbig2_tx_dma_setup = 0;
2204 1.134 msaitoh txr->q_einval_tx_dma_setup = 0;
2205 1.134 msaitoh txr->q_other_tx_dma_setup = 0;
2206 1.134 msaitoh txr->q_eagain_tx_dma_setup = 0;
2207 1.134 msaitoh txr->q_enomem_tx_dma_setup = 0;
2208 1.134 msaitoh txr->q_tso_err = 0;
2209 1.1 dyoung
2210 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
2211 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprc[i], 0);
2212 1.305 msaitoh IXGBE_EVC_STORE(&stats->qptc[i], 0);
2213 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbrc[i], 0);
2214 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbtc[i], 0);
2215 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
2216 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprdc[i], 0);
2217 1.98 msaitoh }
2218 1.98 msaitoh
2219 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_packets, 0);
2220 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
2221 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_copies, 0);
2222 1.305 msaitoh IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
2223 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
2224 1.305 msaitoh }
2225 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs, 0);
2226 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs, 0);
2227 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
2228 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
2229 1.305 msaitoh IXGBE_EVC_STORE(&stats->intzero, 0);
2230 1.305 msaitoh IXGBE_EVC_STORE(&stats->legint, 0);
2231 1.305 msaitoh IXGBE_EVC_STORE(&stats->crcerrs, 0);
2232 1.305 msaitoh IXGBE_EVC_STORE(&stats->illerrc, 0);
2233 1.305 msaitoh IXGBE_EVC_STORE(&stats->errbc, 0);
2234 1.305 msaitoh IXGBE_EVC_STORE(&stats->mspdc, 0);
2235 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
2236 1.305 msaitoh IXGBE_EVC_STORE(&stats->mbsdc, 0);
2237 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpctotal, 0);
2238 1.305 msaitoh IXGBE_EVC_STORE(&stats->mlfc, 0);
2239 1.305 msaitoh IXGBE_EVC_STORE(&stats->mrfc, 0);
2240 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
2241 1.326 msaitoh IXGBE_EVC_STORE(&stats->link_dn_cnt, 0);
2242 1.305 msaitoh IXGBE_EVC_STORE(&stats->rlec, 0);
2243 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxontxc, 0);
2244 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxonrxc, 0);
2245 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxofftxc, 0);
2246 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
2247 1.98 msaitoh
2248 1.98 msaitoh /* Packet Reception Stats */
2249 1.305 msaitoh IXGBE_EVC_STORE(&stats->tor, 0);
2250 1.305 msaitoh IXGBE_EVC_STORE(&stats->gorc, 0);
2251 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpr, 0);
2252 1.305 msaitoh IXGBE_EVC_STORE(&stats->gprc, 0);
2253 1.305 msaitoh IXGBE_EVC_STORE(&stats->mprc, 0);
2254 1.305 msaitoh IXGBE_EVC_STORE(&stats->bprc, 0);
2255 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc64, 0);
2256 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc127, 0);
2257 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc255, 0);
2258 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc511, 0);
2259 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1023, 0);
2260 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1522, 0);
2261 1.305 msaitoh IXGBE_EVC_STORE(&stats->ruc, 0);
2262 1.305 msaitoh IXGBE_EVC_STORE(&stats->rfc, 0);
2263 1.305 msaitoh IXGBE_EVC_STORE(&stats->roc, 0);
2264 1.305 msaitoh IXGBE_EVC_STORE(&stats->rjc, 0);
2265 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngprc, 0);
2266 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngpdc, 0);
2267 1.305 msaitoh IXGBE_EVC_STORE(&stats->xec, 0);
2268 1.98 msaitoh
2269 1.98 msaitoh /* Packet Transmission Stats */
2270 1.305 msaitoh IXGBE_EVC_STORE(&stats->gotc, 0);
2271 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpt, 0);
2272 1.305 msaitoh IXGBE_EVC_STORE(&stats->gptc, 0);
2273 1.305 msaitoh IXGBE_EVC_STORE(&stats->bptc, 0);
2274 1.305 msaitoh IXGBE_EVC_STORE(&stats->mptc, 0);
2275 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngptc, 0);
2276 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc64, 0);
2277 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc127, 0);
2278 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc255, 0);
2279 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc511, 0);
2280 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1023, 0);
2281 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1522, 0);
2282 1.98 msaitoh }
2283 1.98 msaitoh
2284 1.99 msaitoh /************************************************************************
2285 1.99 msaitoh * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2286 1.99 msaitoh *
2287 1.99 msaitoh * Retrieves the TDH value from the hardware
2288 1.99 msaitoh ************************************************************************/
2289 1.185 msaitoh static int
2290 1.98 msaitoh ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2291 1.98 msaitoh {
2292 1.98 msaitoh struct sysctlnode node = *rnode;
2293 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2294 1.333 msaitoh struct ixgbe_softc *sc;
2295 1.98 msaitoh uint32_t val;
2296 1.98 msaitoh
2297 1.99 msaitoh if (!txr)
2298 1.99 msaitoh return (0);
2299 1.99 msaitoh
2300 1.333 msaitoh sc = txr->sc;
2301 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2302 1.169 msaitoh return (EPERM);
2303 1.169 msaitoh
2304 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->me));
2305 1.98 msaitoh node.sysctl_data = &val;
2306 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2307 1.99 msaitoh } /* ixgbe_sysctl_tdh_handler */
2308 1.98 msaitoh
2309 1.99 msaitoh /************************************************************************
2310 1.99 msaitoh * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2311 1.99 msaitoh *
2312 1.99 msaitoh * Retrieves the TDT value from the hardware
2313 1.99 msaitoh ************************************************************************/
2314 1.185 msaitoh static int
2315 1.98 msaitoh ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2316 1.98 msaitoh {
2317 1.98 msaitoh struct sysctlnode node = *rnode;
2318 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2319 1.333 msaitoh struct ixgbe_softc *sc;
2320 1.98 msaitoh uint32_t val;
2321 1.1 dyoung
2322 1.99 msaitoh if (!txr)
2323 1.99 msaitoh return (0);
2324 1.99 msaitoh
2325 1.333 msaitoh sc = txr->sc;
2326 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2327 1.169 msaitoh return (EPERM);
2328 1.169 msaitoh
2329 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->me));
2330 1.98 msaitoh node.sysctl_data = &val;
2331 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2332 1.99 msaitoh } /* ixgbe_sysctl_tdt_handler */
2333 1.45 msaitoh
2334 1.99 msaitoh /************************************************************************
2335 1.154 msaitoh * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2336 1.154 msaitoh * handler function
2337 1.154 msaitoh *
2338 1.154 msaitoh * Retrieves the next_to_check value
2339 1.154 msaitoh ************************************************************************/
2340 1.185 msaitoh static int
2341 1.154 msaitoh ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2342 1.154 msaitoh {
2343 1.154 msaitoh struct sysctlnode node = *rnode;
2344 1.154 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2345 1.333 msaitoh struct ixgbe_softc *sc;
2346 1.154 msaitoh uint32_t val;
2347 1.154 msaitoh
2348 1.154 msaitoh if (!rxr)
2349 1.154 msaitoh return (0);
2350 1.154 msaitoh
2351 1.333 msaitoh sc = rxr->sc;
2352 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2353 1.169 msaitoh return (EPERM);
2354 1.169 msaitoh
2355 1.154 msaitoh val = rxr->next_to_check;
2356 1.154 msaitoh node.sysctl_data = &val;
2357 1.154 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2358 1.154 msaitoh } /* ixgbe_sysctl_next_to_check_handler */
2359 1.154 msaitoh
2360 1.154 msaitoh /************************************************************************
2361 1.287 msaitoh * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check
2362 1.287 msaitoh * handler function
2363 1.287 msaitoh *
2364 1.287 msaitoh * Retrieves the next_to_refresh value
2365 1.287 msaitoh ************************************************************************/
2366 1.287 msaitoh static int
2367 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2368 1.287 msaitoh {
2369 1.287 msaitoh struct sysctlnode node = *rnode;
2370 1.287 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2371 1.333 msaitoh struct ixgbe_softc *sc;
2372 1.287 msaitoh uint32_t val;
2373 1.287 msaitoh
2374 1.287 msaitoh if (!rxr)
2375 1.287 msaitoh return (0);
2376 1.287 msaitoh
2377 1.333 msaitoh sc = rxr->sc;
2378 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2379 1.287 msaitoh return (EPERM);
2380 1.287 msaitoh
2381 1.287 msaitoh val = rxr->next_to_refresh;
2382 1.287 msaitoh node.sysctl_data = &val;
2383 1.287 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2384 1.287 msaitoh } /* ixgbe_sysctl_next_to_refresh_handler */
2385 1.287 msaitoh
2386 1.287 msaitoh /************************************************************************
2387 1.99 msaitoh * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2388 1.99 msaitoh *
2389 1.99 msaitoh * Retrieves the RDH value from the hardware
2390 1.99 msaitoh ************************************************************************/
2391 1.185 msaitoh static int
2392 1.98 msaitoh ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2393 1.98 msaitoh {
2394 1.98 msaitoh struct sysctlnode node = *rnode;
2395 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2396 1.333 msaitoh struct ixgbe_softc *sc;
2397 1.98 msaitoh uint32_t val;
2398 1.1 dyoung
2399 1.99 msaitoh if (!rxr)
2400 1.99 msaitoh return (0);
2401 1.99 msaitoh
2402 1.333 msaitoh sc = rxr->sc;
2403 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2404 1.169 msaitoh return (EPERM);
2405 1.169 msaitoh
2406 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDH(rxr->me));
2407 1.98 msaitoh node.sysctl_data = &val;
2408 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2409 1.99 msaitoh } /* ixgbe_sysctl_rdh_handler */
2410 1.1 dyoung
2411 1.99 msaitoh /************************************************************************
2412 1.99 msaitoh * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2413 1.99 msaitoh *
2414 1.99 msaitoh * Retrieves the RDT value from the hardware
2415 1.99 msaitoh ************************************************************************/
2416 1.185 msaitoh static int
2417 1.98 msaitoh ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2418 1.98 msaitoh {
2419 1.98 msaitoh struct sysctlnode node = *rnode;
2420 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2421 1.333 msaitoh struct ixgbe_softc *sc;
2422 1.98 msaitoh uint32_t val;
2423 1.1 dyoung
2424 1.99 msaitoh if (!rxr)
2425 1.99 msaitoh return (0);
2426 1.99 msaitoh
2427 1.333 msaitoh sc = rxr->sc;
2428 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
2429 1.169 msaitoh return (EPERM);
2430 1.169 msaitoh
2431 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDT(rxr->me));
2432 1.98 msaitoh node.sysctl_data = &val;
2433 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node));
2434 1.99 msaitoh } /* ixgbe_sysctl_rdt_handler */
2435 1.1 dyoung
2436 1.193 msaitoh static int
2437 1.193 msaitoh ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2438 1.193 msaitoh {
2439 1.193 msaitoh struct ifnet *ifp = &ec->ec_if;
2440 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2441 1.193 msaitoh int rv;
2442 1.193 msaitoh
2443 1.193 msaitoh if (set)
2444 1.333 msaitoh rv = ixgbe_register_vlan(sc, vid);
2445 1.193 msaitoh else
2446 1.333 msaitoh rv = ixgbe_unregister_vlan(sc, vid);
2447 1.193 msaitoh
2448 1.200 msaitoh if (rv != 0)
2449 1.200 msaitoh return rv;
2450 1.200 msaitoh
2451 1.200 msaitoh /*
2452 1.200 msaitoh * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2453 1.200 msaitoh * or 0 to 1.
2454 1.200 msaitoh */
2455 1.200 msaitoh if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2456 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2457 1.200 msaitoh
2458 1.193 msaitoh return rv;
2459 1.193 msaitoh }
2460 1.193 msaitoh
2461 1.99 msaitoh /************************************************************************
2462 1.99 msaitoh * ixgbe_register_vlan
2463 1.99 msaitoh *
2464 1.99 msaitoh * Run via vlan config EVENT, it enables us to use the
2465 1.99 msaitoh * HW Filter table since we can get the vlan id. This
2466 1.99 msaitoh * just creates the entry in the soft version of the
2467 1.99 msaitoh * VFTA, init will repopulate the real table.
2468 1.99 msaitoh ************************************************************************/
2469 1.193 msaitoh static int
2470 1.333 msaitoh ixgbe_register_vlan(struct ixgbe_softc *sc, u16 vtag)
2471 1.98 msaitoh {
2472 1.98 msaitoh u16 index, bit;
2473 1.193 msaitoh int error;
2474 1.48 msaitoh
2475 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2476 1.193 msaitoh return EINVAL;
2477 1.1 dyoung
2478 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2479 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2480 1.98 msaitoh bit = vtag & 0x1F;
2481 1.333 msaitoh sc->shadow_vfta[index] |= ((u32)1 << bit);
2482 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, true,
2483 1.193 msaitoh true);
2484 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2485 1.193 msaitoh if (error != 0)
2486 1.193 msaitoh error = EACCES;
2487 1.193 msaitoh
2488 1.193 msaitoh return error;
2489 1.99 msaitoh } /* ixgbe_register_vlan */
2490 1.1 dyoung
2491 1.99 msaitoh /************************************************************************
2492 1.99 msaitoh * ixgbe_unregister_vlan
2493 1.99 msaitoh *
2494 1.99 msaitoh * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2495 1.99 msaitoh ************************************************************************/
2496 1.193 msaitoh static int
2497 1.333 msaitoh ixgbe_unregister_vlan(struct ixgbe_softc *sc, u16 vtag)
2498 1.98 msaitoh {
2499 1.98 msaitoh u16 index, bit;
2500 1.193 msaitoh int error;
2501 1.1 dyoung
2502 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2503 1.193 msaitoh return EINVAL;
2504 1.1 dyoung
2505 1.333 msaitoh IXGBE_CORE_LOCK(sc);
2506 1.98 msaitoh index = (vtag >> 5) & 0x7F;
2507 1.98 msaitoh bit = vtag & 0x1F;
2508 1.333 msaitoh sc->shadow_vfta[index] &= ~((u32)1 << bit);
2509 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, false,
2510 1.193 msaitoh true);
2511 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
2512 1.193 msaitoh if (error != 0)
2513 1.193 msaitoh error = EACCES;
2514 1.193 msaitoh
2515 1.193 msaitoh return error;
2516 1.99 msaitoh } /* ixgbe_unregister_vlan */
2517 1.98 msaitoh
2518 1.98 msaitoh static void
2519 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *sc)
2520 1.98 msaitoh {
2521 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2522 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2523 1.98 msaitoh struct rx_ring *rxr;
2524 1.200 msaitoh u32 ctrl;
2525 1.186 msaitoh int i;
2526 1.177 msaitoh bool hwtagging;
2527 1.98 msaitoh
2528 1.178 msaitoh /* Enable HW tagging only if any vlan is attached */
2529 1.177 msaitoh hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2530 1.178 msaitoh && VLAN_ATTACHED(ec);
2531 1.1 dyoung
2532 1.98 msaitoh /* Setup the queues for vlans */
2533 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
2534 1.333 msaitoh rxr = &sc->rx_rings[i];
2535 1.178 msaitoh /*
2536 1.178 msaitoh * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2537 1.178 msaitoh */
2538 1.177 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
2539 1.177 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2540 1.177 msaitoh if (hwtagging)
2541 1.115 msaitoh ctrl |= IXGBE_RXDCTL_VME;
2542 1.177 msaitoh else
2543 1.177 msaitoh ctrl &= ~IXGBE_RXDCTL_VME;
2544 1.177 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2545 1.98 msaitoh }
2546 1.177 msaitoh rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2547 1.1 dyoung }
2548 1.1 dyoung
2549 1.200 msaitoh /* VLAN hw tagging for 82598 */
2550 1.200 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
2551 1.200 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2552 1.200 msaitoh if (hwtagging)
2553 1.200 msaitoh ctrl |= IXGBE_VLNCTRL_VME;
2554 1.200 msaitoh else
2555 1.200 msaitoh ctrl &= ~IXGBE_VLNCTRL_VME;
2556 1.200 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2557 1.200 msaitoh }
2558 1.200 msaitoh } /* ixgbe_setup_vlan_hw_tagging */
2559 1.200 msaitoh
2560 1.200 msaitoh static void
2561 1.333 msaitoh ixgbe_setup_vlan_hw_support(struct ixgbe_softc *sc)
2562 1.200 msaitoh {
2563 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
2564 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2565 1.200 msaitoh int i;
2566 1.200 msaitoh u32 ctrl;
2567 1.200 msaitoh struct vlanid_list *vlanidp;
2568 1.200 msaitoh
2569 1.200 msaitoh /*
2570 1.294 skrll * This function is called from both if_init and ifflags_cb()
2571 1.200 msaitoh * on NetBSD.
2572 1.200 msaitoh */
2573 1.200 msaitoh
2574 1.200 msaitoh /*
2575 1.200 msaitoh * Part 1:
2576 1.200 msaitoh * Setup VLAN HW tagging
2577 1.200 msaitoh */
2578 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc);
2579 1.200 msaitoh
2580 1.200 msaitoh /*
2581 1.200 msaitoh * Part 2:
2582 1.200 msaitoh * Setup VLAN HW filter
2583 1.200 msaitoh */
2584 1.193 msaitoh /* Cleanup shadow_vfta */
2585 1.193 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2586 1.333 msaitoh sc->shadow_vfta[i] = 0;
2587 1.193 msaitoh /* Generate shadow_vfta from ec_vids */
2588 1.201 msaitoh ETHER_LOCK(ec);
2589 1.193 msaitoh SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2590 1.193 msaitoh uint32_t idx;
2591 1.193 msaitoh
2592 1.193 msaitoh idx = vlanidp->vid / 32;
2593 1.193 msaitoh KASSERT(idx < IXGBE_VFTA_SIZE);
2594 1.333 msaitoh sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2595 1.193 msaitoh }
2596 1.201 msaitoh ETHER_UNLOCK(ec);
2597 1.99 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2598 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), sc->shadow_vfta[i]);
2599 1.22 msaitoh
2600 1.98 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2601 1.98 msaitoh /* Enable the Filter Table if enabled */
2602 1.177 msaitoh if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2603 1.98 msaitoh ctrl |= IXGBE_VLNCTRL_VFE;
2604 1.177 msaitoh else
2605 1.177 msaitoh ctrl &= ~IXGBE_VLNCTRL_VFE;
2606 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2607 1.99 msaitoh } /* ixgbe_setup_vlan_hw_support */
2608 1.1 dyoung
2609 1.99 msaitoh /************************************************************************
2610 1.99 msaitoh * ixgbe_get_slot_info
2611 1.99 msaitoh *
2612 1.99 msaitoh * Get the width and transaction speed of
2613 1.99 msaitoh * the slot this adapter is plugged into.
2614 1.99 msaitoh ************************************************************************/
2615 1.98 msaitoh static void
2616 1.333 msaitoh ixgbe_get_slot_info(struct ixgbe_softc *sc)
2617 1.98 msaitoh {
2618 1.333 msaitoh device_t dev = sc->dev;
2619 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2620 1.186 msaitoh u32 offset;
2621 1.98 msaitoh u16 link;
2622 1.186 msaitoh int bus_info_valid = TRUE;
2623 1.99 msaitoh
2624 1.99 msaitoh /* Some devices are behind an internal bridge */
2625 1.99 msaitoh switch (hw->device_id) {
2626 1.99 msaitoh case IXGBE_DEV_ID_82599_SFP_SF_QP:
2627 1.99 msaitoh case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2628 1.99 msaitoh goto get_parent_info;
2629 1.99 msaitoh default:
2630 1.99 msaitoh break;
2631 1.99 msaitoh }
2632 1.1 dyoung
2633 1.99 msaitoh ixgbe_get_bus_info(hw);
2634 1.99 msaitoh
2635 1.99 msaitoh /*
2636 1.99 msaitoh * Some devices don't use PCI-E, but there is no need
2637 1.99 msaitoh * to display "Unknown" for bus speed and width.
2638 1.99 msaitoh */
2639 1.99 msaitoh switch (hw->mac.type) {
2640 1.99 msaitoh case ixgbe_mac_X550EM_x:
2641 1.99 msaitoh case ixgbe_mac_X550EM_a:
2642 1.99 msaitoh return;
2643 1.99 msaitoh default:
2644 1.99 msaitoh goto display;
2645 1.1 dyoung }
2646 1.1 dyoung
2647 1.99 msaitoh get_parent_info:
2648 1.98 msaitoh /*
2649 1.99 msaitoh * For the Quad port adapter we need to parse back
2650 1.99 msaitoh * up the PCI tree to find the speed of the expansion
2651 1.99 msaitoh * slot into which this adapter is plugged. A bit more work.
2652 1.99 msaitoh */
2653 1.98 msaitoh dev = device_parent(device_parent(dev));
2654 1.99 msaitoh #if 0
2655 1.98 msaitoh #ifdef IXGBE_DEBUG
2656 1.99 msaitoh device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2657 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2658 1.98 msaitoh #endif
2659 1.98 msaitoh dev = device_parent(device_parent(dev));
2660 1.98 msaitoh #ifdef IXGBE_DEBUG
2661 1.99 msaitoh device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2662 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev));
2663 1.99 msaitoh #endif
2664 1.1 dyoung #endif
2665 1.98 msaitoh /* Now get the PCI Express Capabilities offset */
2666 1.333 msaitoh if (pci_get_capability(sc->osdep.pc, sc->osdep.tag,
2667 1.99 msaitoh PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2668 1.99 msaitoh /*
2669 1.99 msaitoh * Hmm...can't get PCI-Express capabilities.
2670 1.99 msaitoh * Falling back to default method.
2671 1.99 msaitoh */
2672 1.99 msaitoh bus_info_valid = FALSE;
2673 1.99 msaitoh ixgbe_get_bus_info(hw);
2674 1.99 msaitoh goto display;
2675 1.99 msaitoh }
2676 1.98 msaitoh /* ...and read the Link Status Register */
2677 1.333 msaitoh link = pci_conf_read(sc->osdep.pc, sc->osdep.tag,
2678 1.120 msaitoh offset + PCIE_LCSR) >> 16;
2679 1.120 msaitoh ixgbe_set_pci_config_data_generic(hw, link);
2680 1.52 msaitoh
2681 1.98 msaitoh display:
2682 1.99 msaitoh device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2683 1.186 msaitoh ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2684 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2685 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2686 1.99 msaitoh "Unknown"),
2687 1.99 msaitoh ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2688 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2689 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2690 1.99 msaitoh "Unknown"));
2691 1.99 msaitoh
2692 1.99 msaitoh if (bus_info_valid) {
2693 1.99 msaitoh if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2694 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2695 1.99 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500))) {
2696 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2697 1.99 msaitoh " for this card\n is not sufficient for"
2698 1.99 msaitoh " optimal performance.\n");
2699 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2700 1.99 msaitoh "PCIE, or x4 PCIE Gen2 slot is required.\n");
2701 1.99 msaitoh }
2702 1.99 msaitoh if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2703 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2704 1.99 msaitoh (hw->bus.speed < ixgbe_bus_speed_8000))) {
2705 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available"
2706 1.99 msaitoh " for this card\n is not sufficient for"
2707 1.99 msaitoh " optimal performance.\n");
2708 1.99 msaitoh device_printf(dev, "For optimal performance a x8 "
2709 1.99 msaitoh "PCIE Gen3 slot is required.\n");
2710 1.99 msaitoh }
2711 1.99 msaitoh } else
2712 1.319 msaitoh device_printf(dev,
2713 1.319 msaitoh "Unable to determine slot speed/width. The speed/width "
2714 1.319 msaitoh "reported are that of the internal switch.\n");
2715 1.45 msaitoh
2716 1.45 msaitoh return;
2717 1.99 msaitoh } /* ixgbe_get_slot_info */
2718 1.1 dyoung
2719 1.99 msaitoh /************************************************************************
2720 1.321 msaitoh * ixgbe_enable_queue - Queue Interrupt Enabler
2721 1.99 msaitoh ************************************************************************/
2722 1.1 dyoung static inline void
2723 1.333 msaitoh ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
2724 1.1 dyoung {
2725 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2726 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2727 1.197 msaitoh u64 queue = 1ULL << vector;
2728 1.186 msaitoh u32 mask;
2729 1.1 dyoung
2730 1.139 knakahar mutex_enter(&que->dc_mtx);
2731 1.139 knakahar if (que->disabled_count > 0 && --que->disabled_count > 0)
2732 1.127 knakahar goto out;
2733 1.127 knakahar
2734 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2735 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2736 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2737 1.1 dyoung } else {
2738 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2739 1.98 msaitoh if (mask)
2740 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2741 1.98 msaitoh mask = (queue >> 32);
2742 1.98 msaitoh if (mask)
2743 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2744 1.1 dyoung }
2745 1.127 knakahar out:
2746 1.139 knakahar mutex_exit(&que->dc_mtx);
2747 1.99 msaitoh } /* ixgbe_enable_queue */
2748 1.1 dyoung
2749 1.99 msaitoh /************************************************************************
2750 1.139 knakahar * ixgbe_disable_queue_internal
2751 1.99 msaitoh ************************************************************************/
2752 1.82 msaitoh static inline void
2753 1.333 msaitoh ixgbe_disable_queue_internal(struct ixgbe_softc *sc, u32 vector, bool nestok)
2754 1.1 dyoung {
2755 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2756 1.333 msaitoh struct ix_queue *que = &sc->queues[vector];
2757 1.197 msaitoh u64 queue = 1ULL << vector;
2758 1.186 msaitoh u32 mask;
2759 1.1 dyoung
2760 1.139 knakahar mutex_enter(&que->dc_mtx);
2761 1.139 knakahar
2762 1.139 knakahar if (que->disabled_count > 0) {
2763 1.139 knakahar if (nestok)
2764 1.139 knakahar que->disabled_count++;
2765 1.139 knakahar goto out;
2766 1.139 knakahar }
2767 1.139 knakahar que->disabled_count++;
2768 1.127 knakahar
2769 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) {
2770 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2771 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2772 1.1 dyoung } else {
2773 1.98 msaitoh mask = (queue & 0xFFFFFFFF);
2774 1.98 msaitoh if (mask)
2775 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2776 1.98 msaitoh mask = (queue >> 32);
2777 1.98 msaitoh if (mask)
2778 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2779 1.1 dyoung }
2780 1.127 knakahar out:
2781 1.139 knakahar mutex_exit(&que->dc_mtx);
2782 1.139 knakahar } /* ixgbe_disable_queue_internal */
2783 1.139 knakahar
2784 1.139 knakahar /************************************************************************
2785 1.139 knakahar * ixgbe_disable_queue
2786 1.139 knakahar ************************************************************************/
2787 1.139 knakahar static inline void
2788 1.333 msaitoh ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
2789 1.139 knakahar {
2790 1.139 knakahar
2791 1.333 msaitoh ixgbe_disable_queue_internal(sc, vector, true);
2792 1.99 msaitoh } /* ixgbe_disable_queue */
2793 1.1 dyoung
2794 1.99 msaitoh /************************************************************************
2795 1.133 knakahar * ixgbe_sched_handle_que - schedule deferred packet processing
2796 1.133 knakahar ************************************************************************/
2797 1.133 knakahar static inline void
2798 1.333 msaitoh ixgbe_sched_handle_que(struct ixgbe_softc *sc, struct ix_queue *que)
2799 1.133 knakahar {
2800 1.133 knakahar
2801 1.185 msaitoh if (que->txrx_use_workqueue) {
2802 1.133 knakahar /*
2803 1.333 msaitoh * sc->que_wq is bound to each CPU instead of
2804 1.133 knakahar * each NIC queue to reduce workqueue kthread. As we
2805 1.133 knakahar * should consider about interrupt affinity in this
2806 1.133 knakahar * function, the workqueue kthread must be WQ_PERCPU.
2807 1.133 knakahar * If create WQ_PERCPU workqueue kthread for each NIC
2808 1.133 knakahar * queue, that number of created workqueue kthread is
2809 1.133 knakahar * (number of used NIC queue) * (number of CPUs) =
2810 1.133 knakahar * (number of CPUs) ^ 2 most often.
2811 1.133 knakahar *
2812 1.133 knakahar * The same NIC queue's interrupts are avoided by
2813 1.133 knakahar * masking the queue's interrupt. And different
2814 1.133 knakahar * NIC queue's interrupts use different struct work
2815 1.133 knakahar * (que->wq_cookie). So, "enqueued flag" to avoid
2816 1.133 knakahar * twice workqueue_enqueue() is not required .
2817 1.133 knakahar */
2818 1.333 msaitoh workqueue_enqueue(sc->que_wq, &que->wq_cookie, curcpu());
2819 1.319 msaitoh } else
2820 1.133 knakahar softint_schedule(que->que_si);
2821 1.133 knakahar }
2822 1.133 knakahar
2823 1.133 knakahar /************************************************************************
2824 1.99 msaitoh * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2825 1.99 msaitoh ************************************************************************/
2826 1.34 msaitoh static int
2827 1.1 dyoung ixgbe_msix_que(void *arg)
2828 1.1 dyoung {
2829 1.1 dyoung struct ix_queue *que = arg;
2830 1.339 msaitoh struct ixgbe_softc *sc = que->sc;
2831 1.333 msaitoh struct ifnet *ifp = sc->ifp;
2832 1.1 dyoung struct tx_ring *txr = que->txr;
2833 1.1 dyoung struct rx_ring *rxr = que->rxr;
2834 1.1 dyoung u32 newitr = 0;
2835 1.1 dyoung
2836 1.33 msaitoh /* Protect against spurious interrupts */
2837 1.33 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
2838 1.34 msaitoh return 0;
2839 1.33 msaitoh
2840 1.333 msaitoh ixgbe_disable_queue(sc, que->msix);
2841 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
2842 1.1 dyoung
2843 1.147 knakahar /*
2844 1.147 knakahar * Don't change "que->txrx_use_workqueue" from this point to avoid
2845 1.147 knakahar * flip-flopping softint/workqueue mode in one deferred processing.
2846 1.147 knakahar */
2847 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
2848 1.147 knakahar
2849 1.1 dyoung IXGBE_TX_LOCK(txr);
2850 1.33 msaitoh ixgbe_txeof(txr);
2851 1.1 dyoung IXGBE_TX_UNLOCK(txr);
2852 1.1 dyoung
2853 1.1 dyoung /* Do AIM now? */
2854 1.1 dyoung
2855 1.333 msaitoh if (sc->enable_aim == false)
2856 1.1 dyoung goto no_calc;
2857 1.1 dyoung /*
2858 1.99 msaitoh * Do Adaptive Interrupt Moderation:
2859 1.99 msaitoh * - Write out last calculated setting
2860 1.99 msaitoh * - Calculate based on average size over
2861 1.99 msaitoh * the last interval.
2862 1.99 msaitoh */
2863 1.99 msaitoh if (que->eitr_setting)
2864 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, que->eitr_setting);
2865 1.99 msaitoh
2866 1.98 msaitoh que->eitr_setting = 0;
2867 1.1 dyoung
2868 1.98 msaitoh /* Idle, do nothing */
2869 1.186 msaitoh if ((txr->bytes == 0) && (rxr->bytes == 0))
2870 1.186 msaitoh goto no_calc;
2871 1.185 msaitoh
2872 1.1 dyoung if ((txr->bytes) && (txr->packets))
2873 1.98 msaitoh newitr = txr->bytes/txr->packets;
2874 1.1 dyoung if ((rxr->bytes) && (rxr->packets))
2875 1.165 riastrad newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2876 1.1 dyoung newitr += 24; /* account for hardware frame, crc */
2877 1.1 dyoung
2878 1.1 dyoung /* set an upper boundary */
2879 1.165 riastrad newitr = uimin(newitr, 3000);
2880 1.1 dyoung
2881 1.1 dyoung /* Be nice to the mid range */
2882 1.1 dyoung if ((newitr > 300) && (newitr < 1200))
2883 1.1 dyoung newitr = (newitr / 3);
2884 1.1 dyoung else
2885 1.1 dyoung newitr = (newitr / 2);
2886 1.1 dyoung
2887 1.124 msaitoh /*
2888 1.124 msaitoh * When RSC is used, ITR interval must be larger than RSC_DELAY.
2889 1.124 msaitoh * Currently, we use 2us for RSC_DELAY. The minimum value is always
2890 1.124 msaitoh * greater than 2us on 100M (and 10M?(not documented)), but it's not
2891 1.124 msaitoh * on 1G and higher.
2892 1.124 msaitoh */
2893 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
2894 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL))
2895 1.124 msaitoh if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2896 1.124 msaitoh newitr = IXGBE_MIN_RSC_EITR_10G1G;
2897 1.124 msaitoh
2898 1.186 msaitoh /* save for next interrupt */
2899 1.186 msaitoh que->eitr_setting = newitr;
2900 1.1 dyoung
2901 1.98 msaitoh /* Reset state */
2902 1.98 msaitoh txr->bytes = 0;
2903 1.98 msaitoh txr->packets = 0;
2904 1.98 msaitoh rxr->bytes = 0;
2905 1.98 msaitoh rxr->packets = 0;
2906 1.1 dyoung
2907 1.1 dyoung no_calc:
2908 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
2909 1.99 msaitoh
2910 1.34 msaitoh return 1;
2911 1.99 msaitoh } /* ixgbe_msix_que */
2912 1.1 dyoung
2913 1.99 msaitoh /************************************************************************
2914 1.99 msaitoh * ixgbe_media_status - Media Ioctl callback
2915 1.98 msaitoh *
2916 1.99 msaitoh * Called whenever the user queries the status of
2917 1.99 msaitoh * the interface using ifconfig.
2918 1.99 msaitoh ************************************************************************/
2919 1.98 msaitoh static void
2920 1.98 msaitoh ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2921 1.1 dyoung {
2922 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
2923 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
2924 1.98 msaitoh int layer;
2925 1.1 dyoung
2926 1.98 msaitoh INIT_DEBUGOUT("ixgbe_media_status: begin");
2927 1.333 msaitoh ixgbe_update_link_status(sc);
2928 1.1 dyoung
2929 1.1 dyoung ifmr->ifm_status = IFM_AVALID;
2930 1.1 dyoung ifmr->ifm_active = IFM_ETHER;
2931 1.1 dyoung
2932 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
2933 1.68 msaitoh ifmr->ifm_active |= IFM_NONE;
2934 1.1 dyoung return;
2935 1.1 dyoung }
2936 1.1 dyoung
2937 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE;
2938 1.333 msaitoh layer = sc->phy_layer;
2939 1.1 dyoung
2940 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2941 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2942 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2943 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2944 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2945 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2946 1.333 msaitoh switch (sc->link_speed) {
2947 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2948 1.43 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2949 1.43 msaitoh break;
2950 1.103 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
2951 1.103 msaitoh ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2952 1.103 msaitoh break;
2953 1.103 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
2954 1.103 msaitoh ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2955 1.103 msaitoh break;
2956 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2957 1.33 msaitoh ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2958 1.43 msaitoh break;
2959 1.43 msaitoh case IXGBE_LINK_SPEED_100_FULL:
2960 1.24 msaitoh ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2961 1.43 msaitoh break;
2962 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
2963 1.99 msaitoh ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2964 1.99 msaitoh break;
2965 1.43 msaitoh }
2966 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2967 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2968 1.333 msaitoh switch (sc->link_speed) {
2969 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2970 1.43 msaitoh ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2971 1.43 msaitoh break;
2972 1.43 msaitoh }
2973 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2974 1.333 msaitoh switch (sc->link_speed) {
2975 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2976 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2977 1.43 msaitoh break;
2978 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2979 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2980 1.43 msaitoh break;
2981 1.43 msaitoh }
2982 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2983 1.333 msaitoh switch (sc->link_speed) {
2984 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2985 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2986 1.43 msaitoh break;
2987 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2988 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2989 1.43 msaitoh break;
2990 1.43 msaitoh }
2991 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2992 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2993 1.333 msaitoh switch (sc->link_speed) {
2994 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
2995 1.43 msaitoh ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2996 1.43 msaitoh break;
2997 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
2998 1.28 msaitoh ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2999 1.43 msaitoh break;
3000 1.43 msaitoh }
3001 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3002 1.333 msaitoh switch (sc->link_speed) {
3003 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3004 1.43 msaitoh ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
3005 1.43 msaitoh break;
3006 1.43 msaitoh }
3007 1.43 msaitoh /*
3008 1.99 msaitoh * XXX: These need to use the proper media types once
3009 1.99 msaitoh * they're added.
3010 1.99 msaitoh */
3011 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3012 1.333 msaitoh switch (sc->link_speed) {
3013 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3014 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
3015 1.48 msaitoh break;
3016 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3017 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3018 1.48 msaitoh break;
3019 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3020 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3021 1.48 msaitoh break;
3022 1.48 msaitoh }
3023 1.99 msaitoh else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
3024 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
3025 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3026 1.333 msaitoh switch (sc->link_speed) {
3027 1.48 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
3028 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
3029 1.48 msaitoh break;
3030 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
3031 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
3032 1.48 msaitoh break;
3033 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
3034 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
3035 1.48 msaitoh break;
3036 1.48 msaitoh }
3037 1.98 msaitoh
3038 1.43 msaitoh /* If nothing is recognized... */
3039 1.43 msaitoh #if 0
3040 1.43 msaitoh if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
3041 1.43 msaitoh ifmr->ifm_active |= IFM_UNKNOWN;
3042 1.43 msaitoh #endif
3043 1.98 msaitoh
3044 1.104 msaitoh ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
3045 1.104 msaitoh
3046 1.44 msaitoh /* Display current flow control setting used on link */
3047 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
3048 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3049 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3050 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
3051 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full)
3052 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3053 1.1 dyoung
3054 1.1 dyoung return;
3055 1.99 msaitoh } /* ixgbe_media_status */
3056 1.1 dyoung
3057 1.99 msaitoh /************************************************************************
3058 1.99 msaitoh * ixgbe_media_change - Media Ioctl callback
3059 1.1 dyoung *
3060 1.99 msaitoh * Called when the user changes speed/duplex using
3061 1.99 msaitoh * media/mediopt option with ifconfig.
3062 1.99 msaitoh ************************************************************************/
3063 1.1 dyoung static int
3064 1.98 msaitoh ixgbe_media_change(struct ifnet *ifp)
3065 1.1 dyoung {
3066 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
3067 1.333 msaitoh struct ifmedia *ifm = &sc->media;
3068 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3069 1.43 msaitoh ixgbe_link_speed speed = 0;
3070 1.94 msaitoh ixgbe_link_speed link_caps = 0;
3071 1.94 msaitoh bool negotiate = false;
3072 1.94 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
3073 1.1 dyoung
3074 1.1 dyoung INIT_DEBUGOUT("ixgbe_media_change: begin");
3075 1.1 dyoung
3076 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3077 1.1 dyoung return (EINVAL);
3078 1.1 dyoung
3079 1.44 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
3080 1.144 msaitoh return (EPERM);
3081 1.44 msaitoh
3082 1.43 msaitoh /*
3083 1.99 msaitoh * We don't actually need to check against the supported
3084 1.99 msaitoh * media types of the adapter; ifmedia will take care of
3085 1.99 msaitoh * that for us.
3086 1.99 msaitoh */
3087 1.43 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) {
3088 1.98 msaitoh case IFM_AUTO:
3089 1.98 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3090 1.98 msaitoh &negotiate);
3091 1.98 msaitoh if (err != IXGBE_SUCCESS) {
3092 1.333 msaitoh device_printf(sc->dev, "Unable to determine "
3093 1.98 msaitoh "supported advertise speeds\n");
3094 1.98 msaitoh return (ENODEV);
3095 1.98 msaitoh }
3096 1.98 msaitoh speed |= link_caps;
3097 1.98 msaitoh break;
3098 1.98 msaitoh case IFM_10G_T:
3099 1.98 msaitoh case IFM_10G_LRM:
3100 1.98 msaitoh case IFM_10G_LR:
3101 1.98 msaitoh case IFM_10G_TWINAX:
3102 1.181 msaitoh case IFM_10G_SR:
3103 1.181 msaitoh case IFM_10G_CX4:
3104 1.98 msaitoh case IFM_10G_KR:
3105 1.98 msaitoh case IFM_10G_KX4:
3106 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
3107 1.98 msaitoh break;
3108 1.103 msaitoh case IFM_5000_T:
3109 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
3110 1.103 msaitoh break;
3111 1.103 msaitoh case IFM_2500_T:
3112 1.99 msaitoh case IFM_2500_KX:
3113 1.99 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3114 1.99 msaitoh break;
3115 1.98 msaitoh case IFM_1000_T:
3116 1.98 msaitoh case IFM_1000_LX:
3117 1.98 msaitoh case IFM_1000_SX:
3118 1.98 msaitoh case IFM_1000_KX:
3119 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
3120 1.98 msaitoh break;
3121 1.98 msaitoh case IFM_100_TX:
3122 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
3123 1.98 msaitoh break;
3124 1.99 msaitoh case IFM_10_T:
3125 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
3126 1.99 msaitoh break;
3127 1.140 msaitoh case IFM_NONE:
3128 1.140 msaitoh break;
3129 1.98 msaitoh default:
3130 1.98 msaitoh goto invalid;
3131 1.48 msaitoh }
3132 1.43 msaitoh
3133 1.43 msaitoh hw->mac.autotry_restart = TRUE;
3134 1.43 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
3135 1.333 msaitoh sc->advertise = 0;
3136 1.109 msaitoh if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3137 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3138 1.333 msaitoh sc->advertise |= 1 << 2;
3139 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3140 1.333 msaitoh sc->advertise |= 1 << 1;
3141 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3142 1.333 msaitoh sc->advertise |= 1 << 0;
3143 1.99 msaitoh if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3144 1.333 msaitoh sc->advertise |= 1 << 3;
3145 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3146 1.333 msaitoh sc->advertise |= 1 << 4;
3147 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3148 1.333 msaitoh sc->advertise |= 1 << 5;
3149 1.51 msaitoh }
3150 1.1 dyoung
3151 1.1 dyoung return (0);
3152 1.43 msaitoh
3153 1.43 msaitoh invalid:
3154 1.333 msaitoh device_printf(sc->dev, "Invalid media type!\n");
3155 1.98 msaitoh
3156 1.43 msaitoh return (EINVAL);
3157 1.99 msaitoh } /* ixgbe_media_change */
3158 1.1 dyoung
3159 1.99 msaitoh /************************************************************************
3160 1.320 msaitoh * ixgbe_msix_admin - Link status change ISR (MSI-X)
3161 1.99 msaitoh ************************************************************************/
3162 1.98 msaitoh static int
3163 1.233 msaitoh ixgbe_msix_admin(void *arg)
3164 1.98 msaitoh {
3165 1.333 msaitoh struct ixgbe_softc *sc = arg;
3166 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3167 1.277 msaitoh u32 eicr;
3168 1.273 msaitoh u32 eims_orig;
3169 1.273 msaitoh u32 eims_disable = 0;
3170 1.98 msaitoh
3171 1.333 msaitoh IXGBE_EVC_ADD(&sc->admin_irqev, 1);
3172 1.98 msaitoh
3173 1.273 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3174 1.273 msaitoh /* Pause other interrupts */
3175 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3176 1.273 msaitoh
3177 1.125 knakahar /*
3178 1.273 msaitoh * First get the cause.
3179 1.273 msaitoh *
3180 1.125 knakahar * The specifications of 82598, 82599, X540 and X550 say EICS register
3181 1.125 knakahar * is write only. However, Linux says it is a workaround for silicon
3182 1.273 msaitoh * errata to read EICS instead of EICR to get interrupt cause.
3183 1.273 msaitoh * At least, reading EICR clears lower 16bits of EIMS on 82598.
3184 1.125 knakahar */
3185 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3186 1.98 msaitoh /* Be sure the queue bits are not cleared */
3187 1.99 msaitoh eicr &= ~IXGBE_EICR_RTX_QUEUE;
3188 1.265 msaitoh /* Clear all OTHER interrupts with write */
3189 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3190 1.1 dyoung
3191 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
3192 1.277 msaitoh
3193 1.277 msaitoh /* Re-enable some OTHER interrupts */
3194 1.277 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3195 1.277 msaitoh
3196 1.277 msaitoh return 1;
3197 1.277 msaitoh } /* ixgbe_msix_admin */
3198 1.277 msaitoh
3199 1.277 msaitoh static void
3200 1.333 msaitoh ixgbe_intr_admin_common(struct ixgbe_softc *sc, u32 eicr, u32 *eims_disable)
3201 1.277 msaitoh {
3202 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3203 1.277 msaitoh u32 task_requests = 0;
3204 1.277 msaitoh s32 retval;
3205 1.277 msaitoh
3206 1.266 msaitoh /* Link status change */
3207 1.266 msaitoh if (eicr & IXGBE_EICR_LSC) {
3208 1.266 msaitoh task_requests |= IXGBE_REQUEST_TASK_LSC;
3209 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3210 1.266 msaitoh }
3211 1.266 msaitoh
3212 1.204 msaitoh if (ixgbe_is_sfp(hw)) {
3213 1.310 msaitoh u32 eicr_mask;
3214 1.310 msaitoh
3215 1.204 msaitoh /* Pluggable optics-related interrupt */
3216 1.204 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
3217 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3218 1.204 msaitoh else
3219 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3220 1.204 msaitoh
3221 1.204 msaitoh /*
3222 1.204 msaitoh * An interrupt might not arrive when a module is inserted.
3223 1.204 msaitoh * When an link status change interrupt occurred and the driver
3224 1.204 msaitoh * still regard SFP as unplugged, issue the module softint
3225 1.204 msaitoh * and then issue LSC interrupt.
3226 1.204 msaitoh */
3227 1.204 msaitoh if ((eicr & eicr_mask)
3228 1.204 msaitoh || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3229 1.204 msaitoh && (eicr & IXGBE_EICR_LSC))) {
3230 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD;
3231 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC;
3232 1.204 msaitoh }
3233 1.204 msaitoh
3234 1.204 msaitoh if ((hw->mac.type == ixgbe_mac_82599EB) &&
3235 1.204 msaitoh (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3236 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF;
3237 1.277 msaitoh *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3238 1.204 msaitoh }
3239 1.204 msaitoh }
3240 1.204 msaitoh
3241 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3242 1.311 msaitoh #ifdef IXGBE_FDIR
3243 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
3244 1.99 msaitoh (eicr & IXGBE_EICR_FLOW_DIR)) {
3245 1.333 msaitoh if (!atomic_cas_uint(&sc->fdir_reinit, 0, 1)) {
3246 1.275 msaitoh task_requests |= IXGBE_REQUEST_TASK_FDIR;
3247 1.275 msaitoh /* Disable the interrupt */
3248 1.277 msaitoh *eims_disable |= IXGBE_EIMS_FLOW_DIR;
3249 1.275 msaitoh }
3250 1.99 msaitoh }
3251 1.311 msaitoh #endif
3252 1.99 msaitoh
3253 1.99 msaitoh if (eicr & IXGBE_EICR_ECC) {
3254 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3255 1.312 msaitoh &ixgbe_errlog_intrvl))
3256 1.333 msaitoh device_printf(sc->dev,
3257 1.312 msaitoh "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3258 1.98 msaitoh }
3259 1.1 dyoung
3260 1.98 msaitoh /* Check for over temp condition */
3261 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3262 1.333 msaitoh switch (sc->hw.mac.type) {
3263 1.99 msaitoh case ixgbe_mac_X550EM_a:
3264 1.99 msaitoh if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3265 1.99 msaitoh break;
3266 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3267 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3268 1.99 msaitoh break;
3269 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3270 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3271 1.333 msaitoh device_printf(sc->dev,
3272 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3273 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3274 1.333 msaitoh device_printf(sc->dev,
3275 1.312 msaitoh "System shutdown required!\n");
3276 1.312 msaitoh }
3277 1.99 msaitoh break;
3278 1.99 msaitoh default:
3279 1.99 msaitoh if (!(eicr & IXGBE_EICR_TS))
3280 1.99 msaitoh break;
3281 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw);
3282 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP)
3283 1.99 msaitoh break;
3284 1.333 msaitoh if (ratecheck(&sc->lasterr_time,
3285 1.312 msaitoh &ixgbe_errlog_intrvl)) {
3286 1.333 msaitoh device_printf(sc->dev,
3287 1.312 msaitoh "CRITICAL: OVER TEMP!! "
3288 1.312 msaitoh "PHY IS SHUT DOWN!!\n");
3289 1.333 msaitoh device_printf(sc->dev,
3290 1.312 msaitoh "System shutdown required!\n");
3291 1.312 msaitoh }
3292 1.99 msaitoh break;
3293 1.99 msaitoh }
3294 1.1 dyoung }
3295 1.99 msaitoh
3296 1.99 msaitoh /* Check for VF message */
3297 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
3298 1.233 msaitoh (eicr & IXGBE_EICR_MAILBOX)) {
3299 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MBX;
3300 1.277 msaitoh *eims_disable |= IXGBE_EIMS_MAILBOX;
3301 1.233 msaitoh }
3302 1.1 dyoung }
3303 1.1 dyoung
3304 1.98 msaitoh /* Check for fan failure */
3305 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
3306 1.333 msaitoh ixgbe_check_fan_failure(sc, eicr, true);
3307 1.1 dyoung
3308 1.98 msaitoh /* External PHY interrupt */
3309 1.99 msaitoh if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3310 1.99 msaitoh (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3311 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_PHY;
3312 1.277 msaitoh *eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3313 1.233 msaitoh }
3314 1.233 msaitoh
3315 1.233 msaitoh if (task_requests != 0) {
3316 1.333 msaitoh mutex_enter(&sc->admin_mtx);
3317 1.333 msaitoh sc->task_requests |= task_requests;
3318 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
3319 1.333 msaitoh mutex_exit(&sc->admin_mtx);
3320 1.186 msaitoh }
3321 1.277 msaitoh }
3322 1.1 dyoung
3323 1.124 msaitoh static void
3324 1.333 msaitoh ixgbe_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr)
3325 1.124 msaitoh {
3326 1.185 msaitoh
3327 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB)
3328 1.186 msaitoh itr |= itr << 16;
3329 1.186 msaitoh else
3330 1.186 msaitoh itr |= IXGBE_EITR_CNT_WDIS;
3331 1.124 msaitoh
3332 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(index), itr);
3333 1.124 msaitoh }
3334 1.124 msaitoh
3335 1.124 msaitoh
3336 1.99 msaitoh /************************************************************************
3337 1.99 msaitoh * ixgbe_sysctl_interrupt_rate_handler
3338 1.99 msaitoh ************************************************************************/
3339 1.98 msaitoh static int
3340 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3341 1.1 dyoung {
3342 1.98 msaitoh struct sysctlnode node = *rnode;
3343 1.99 msaitoh struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3344 1.339 msaitoh struct ixgbe_softc *sc;
3345 1.98 msaitoh uint32_t reg, usec, rate;
3346 1.98 msaitoh int error;
3347 1.45 msaitoh
3348 1.98 msaitoh if (que == NULL)
3349 1.98 msaitoh return 0;
3350 1.169 msaitoh
3351 1.333 msaitoh sc = que->sc;
3352 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
3353 1.169 msaitoh return (EPERM);
3354 1.169 msaitoh
3355 1.333 msaitoh reg = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(que->msix));
3356 1.98 msaitoh usec = ((reg & 0x0FF8) >> 3);
3357 1.98 msaitoh if (usec > 0)
3358 1.98 msaitoh rate = 500000 / usec;
3359 1.98 msaitoh else
3360 1.98 msaitoh rate = 0;
3361 1.98 msaitoh node.sysctl_data = &rate;
3362 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
3363 1.98 msaitoh if (error || newp == NULL)
3364 1.98 msaitoh return error;
3365 1.98 msaitoh reg &= ~0xfff; /* default, no limitation */
3366 1.98 msaitoh if (rate > 0 && rate < 500000) {
3367 1.98 msaitoh if (rate < 1000)
3368 1.98 msaitoh rate = 1000;
3369 1.228 msaitoh reg |= ((4000000 / rate) & 0xff8);
3370 1.124 msaitoh /*
3371 1.124 msaitoh * When RSC is used, ITR interval must be larger than
3372 1.124 msaitoh * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3373 1.124 msaitoh * The minimum value is always greater than 2us on 100M
3374 1.124 msaitoh * (and 10M?(not documented)), but it's not on 1G and higher.
3375 1.124 msaitoh */
3376 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL)
3377 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3378 1.333 msaitoh if ((sc->num_queues > 1)
3379 1.124 msaitoh && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3380 1.124 msaitoh return EINVAL;
3381 1.124 msaitoh }
3382 1.343 msaitoh sc->max_interrupt_rate = rate;
3383 1.124 msaitoh } else
3384 1.343 msaitoh sc->max_interrupt_rate = 0;
3385 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, reg);
3386 1.99 msaitoh
3387 1.99 msaitoh return (0);
3388 1.99 msaitoh } /* ixgbe_sysctl_interrupt_rate_handler */
3389 1.45 msaitoh
3390 1.98 msaitoh const struct sysctlnode *
3391 1.333 msaitoh ixgbe_sysctl_instance(struct ixgbe_softc *sc)
3392 1.98 msaitoh {
3393 1.98 msaitoh const char *dvname;
3394 1.98 msaitoh struct sysctllog **log;
3395 1.98 msaitoh int rc;
3396 1.98 msaitoh const struct sysctlnode *rnode;
3397 1.1 dyoung
3398 1.333 msaitoh if (sc->sysctltop != NULL)
3399 1.333 msaitoh return sc->sysctltop;
3400 1.1 dyoung
3401 1.333 msaitoh log = &sc->sysctllog;
3402 1.333 msaitoh dvname = device_xname(sc->dev);
3403 1.1 dyoung
3404 1.98 msaitoh if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3405 1.98 msaitoh 0, CTLTYPE_NODE, dvname,
3406 1.98 msaitoh SYSCTL_DESCR("ixgbe information and settings"),
3407 1.98 msaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3408 1.98 msaitoh goto err;
3409 1.63 msaitoh
3410 1.98 msaitoh return rnode;
3411 1.98 msaitoh err:
3412 1.333 msaitoh device_printf(sc->dev,
3413 1.207 msaitoh "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3414 1.98 msaitoh return NULL;
3415 1.63 msaitoh }
3416 1.63 msaitoh
3417 1.99 msaitoh /************************************************************************
3418 1.99 msaitoh * ixgbe_add_device_sysctls
3419 1.99 msaitoh ************************************************************************/
3420 1.63 msaitoh static void
3421 1.333 msaitoh ixgbe_add_device_sysctls(struct ixgbe_softc *sc)
3422 1.1 dyoung {
3423 1.333 msaitoh device_t dev = sc->dev;
3424 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3425 1.98 msaitoh struct sysctllog **log;
3426 1.98 msaitoh const struct sysctlnode *rnode, *cnode;
3427 1.1 dyoung
3428 1.333 msaitoh log = &sc->sysctllog;
3429 1.1 dyoung
3430 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) {
3431 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl root\n");
3432 1.98 msaitoh return;
3433 1.98 msaitoh }
3434 1.1 dyoung
3435 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3436 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3437 1.158 msaitoh "debug", SYSCTL_DESCR("Debug Info"),
3438 1.333 msaitoh ixgbe_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL)
3439 1.280 msaitoh != 0)
3440 1.158 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3441 1.158 msaitoh
3442 1.158 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3443 1.286 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3444 1.286 msaitoh "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
3445 1.286 msaitoh ixgbe_sysctl_rx_copy_len, 0,
3446 1.333 msaitoh (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0)
3447 1.286 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3448 1.286 msaitoh
3449 1.286 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3450 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3451 1.314 msaitoh "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"),
3452 1.333 msaitoh NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3453 1.314 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3454 1.314 msaitoh
3455 1.314 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3456 1.314 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3457 1.314 msaitoh "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"),
3458 1.333 msaitoh NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3459 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3460 1.1 dyoung
3461 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3462 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit",
3463 1.313 msaitoh SYSCTL_DESCR("max number of RX packets to process"),
3464 1.333 msaitoh ixgbe_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3465 1.313 msaitoh CTL_EOL) != 0)
3466 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3467 1.313 msaitoh
3468 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3469 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit",
3470 1.313 msaitoh SYSCTL_DESCR("max number of TX packets to process"),
3471 1.333 msaitoh ixgbe_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE,
3472 1.313 msaitoh CTL_EOL) != 0)
3473 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3474 1.313 msaitoh
3475 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3476 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT,
3477 1.98 msaitoh "num_queues", SYSCTL_DESCR("Number of queues"),
3478 1.333 msaitoh NULL, 0, &sc->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3479 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3480 1.43 msaitoh
3481 1.98 msaitoh /* Sysctls for all devices */
3482 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3483 1.99 msaitoh CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3484 1.333 msaitoh ixgbe_sysctl_flowcntl, 0, (void *)sc, 0, CTL_CREATE,
3485 1.99 msaitoh CTL_EOL) != 0)
3486 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3487 1.63 msaitoh
3488 1.333 msaitoh sc->enable_aim = ixgbe_enable_aim;
3489 1.343 msaitoh sc->max_interrupt_rate = ixgbe_max_interrupt_rate;
3490 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3491 1.99 msaitoh CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3492 1.333 msaitoh NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3493 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3494 1.1 dyoung
3495 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode,
3496 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT,
3497 1.98 msaitoh "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3498 1.333 msaitoh ixgbe_sysctl_advertise, 0, (void *)sc, 0, CTL_CREATE,
3499 1.99 msaitoh CTL_EOL) != 0)
3500 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3501 1.1 dyoung
3502 1.147 knakahar /*
3503 1.147 knakahar * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3504 1.147 knakahar * it causesflip-flopping softint/workqueue mode in one deferred
3505 1.147 knakahar * processing. Therefore, preempt_disable()/preempt_enable() are
3506 1.147 knakahar * required in ixgbe_sched_handle_que() to avoid
3507 1.147 knakahar * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3508 1.147 knakahar * I think changing "que->txrx_use_workqueue" in interrupt handler
3509 1.147 knakahar * is lighter than doing preempt_disable()/preempt_enable() in every
3510 1.147 knakahar * ixgbe_sched_handle_que().
3511 1.147 knakahar */
3512 1.333 msaitoh sc->txrx_use_workqueue = ixgbe_txrx_workqueue;
3513 1.128 knakahar if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3514 1.280 msaitoh CTLTYPE_BOOL, "txrx_workqueue",
3515 1.280 msaitoh SYSCTL_DESCR("Use workqueue for packet processing"),
3516 1.333 msaitoh NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE,
3517 1.280 msaitoh CTL_EOL) != 0)
3518 1.128 knakahar aprint_error_dev(dev, "could not create sysctl\n");
3519 1.128 knakahar
3520 1.98 msaitoh #ifdef IXGBE_DEBUG
3521 1.98 msaitoh /* testing sysctls (for all devices) */
3522 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3523 1.99 msaitoh CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3524 1.333 msaitoh ixgbe_sysctl_power_state, 0, (void *)sc, 0, CTL_CREATE,
3525 1.99 msaitoh CTL_EOL) != 0)
3526 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3527 1.45 msaitoh
3528 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3529 1.99 msaitoh CTLTYPE_STRING, "print_rss_config",
3530 1.99 msaitoh SYSCTL_DESCR("Prints RSS Configuration"),
3531 1.333 msaitoh ixgbe_sysctl_print_rss_config, 0, (void *)sc, 0, CTL_CREATE,
3532 1.99 msaitoh CTL_EOL) != 0)
3533 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3534 1.98 msaitoh #endif
3535 1.98 msaitoh /* for X550 series devices */
3536 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3537 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3538 1.99 msaitoh CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3539 1.333 msaitoh ixgbe_sysctl_dmac, 0, (void *)sc, 0, CTL_CREATE,
3540 1.99 msaitoh CTL_EOL) != 0)
3541 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3542 1.1 dyoung
3543 1.98 msaitoh /* for WoL-capable devices */
3544 1.333 msaitoh if (sc->wol_support) {
3545 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3546 1.99 msaitoh CTLTYPE_BOOL, "wol_enable",
3547 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3548 1.333 msaitoh ixgbe_sysctl_wol_enable, 0, (void *)sc, 0, CTL_CREATE,
3549 1.99 msaitoh CTL_EOL) != 0)
3550 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3551 1.1 dyoung
3552 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3553 1.99 msaitoh CTLTYPE_INT, "wufc",
3554 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3555 1.333 msaitoh ixgbe_sysctl_wufc, 0, (void *)sc, 0, CTL_CREATE,
3556 1.99 msaitoh CTL_EOL) != 0)
3557 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3558 1.98 msaitoh }
3559 1.1 dyoung
3560 1.98 msaitoh /* for X552/X557-AT devices */
3561 1.325 msaitoh if ((hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) ||
3562 1.325 msaitoh (hw->device_id == IXGBE_DEV_ID_X550EM_A_10G_T)) {
3563 1.98 msaitoh const struct sysctlnode *phy_node;
3564 1.1 dyoung
3565 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3566 1.98 msaitoh "phy", SYSCTL_DESCR("External PHY sysctls"),
3567 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3568 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3569 1.98 msaitoh return;
3570 1.98 msaitoh }
3571 1.1 dyoung
3572 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3573 1.99 msaitoh CTLTYPE_INT, "temp",
3574 1.99 msaitoh SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3575 1.333 msaitoh ixgbe_sysctl_phy_temp, 0, (void *)sc, 0, CTL_CREATE,
3576 1.99 msaitoh CTL_EOL) != 0)
3577 1.99 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3578 1.99 msaitoh
3579 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3580 1.99 msaitoh CTLTYPE_INT, "overtemp_occurred",
3581 1.280 msaitoh SYSCTL_DESCR(
3582 1.280 msaitoh "External PHY High Temperature Event Occurred"),
3583 1.333 msaitoh ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)sc, 0,
3584 1.99 msaitoh CTL_CREATE, CTL_EOL) != 0)
3585 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3586 1.99 msaitoh }
3587 1.33 msaitoh
3588 1.163 msaitoh if ((hw->mac.type == ixgbe_mac_X550EM_a)
3589 1.163 msaitoh && (hw->phy.type == ixgbe_phy_fw))
3590 1.163 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3591 1.163 msaitoh CTLTYPE_BOOL, "force_10_100_autonego",
3592 1.163 msaitoh SYSCTL_DESCR("Force autonego on 10M and 100M"),
3593 1.163 msaitoh NULL, 0, &hw->phy.force_10_100_autonego, 0,
3594 1.163 msaitoh CTL_CREATE, CTL_EOL) != 0)
3595 1.163 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3596 1.163 msaitoh
3597 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) {
3598 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3599 1.99 msaitoh CTLTYPE_INT, "eee_state",
3600 1.99 msaitoh SYSCTL_DESCR("EEE Power Save State"),
3601 1.333 msaitoh ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE,
3602 1.99 msaitoh CTL_EOL) != 0)
3603 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n");
3604 1.98 msaitoh }
3605 1.99 msaitoh } /* ixgbe_add_device_sysctls */
3606 1.1 dyoung
3607 1.99 msaitoh /************************************************************************
3608 1.99 msaitoh * ixgbe_allocate_pci_resources
3609 1.99 msaitoh ************************************************************************/
3610 1.98 msaitoh static int
3611 1.333 msaitoh ixgbe_allocate_pci_resources(struct ixgbe_softc *sc,
3612 1.98 msaitoh const struct pci_attach_args *pa)
3613 1.1 dyoung {
3614 1.346 msaitoh pcireg_t memtype, csr;
3615 1.333 msaitoh device_t dev = sc->dev;
3616 1.98 msaitoh bus_addr_t addr;
3617 1.98 msaitoh int flags;
3618 1.1 dyoung
3619 1.98 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3620 1.98 msaitoh switch (memtype) {
3621 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3622 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3623 1.333 msaitoh sc->osdep.mem_bus_space_tag = pa->pa_memt;
3624 1.98 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3625 1.333 msaitoh memtype, &addr, &sc->osdep.mem_size, &flags) != 0)
3626 1.98 msaitoh goto map_err;
3627 1.98 msaitoh if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3628 1.98 msaitoh aprint_normal_dev(dev, "clearing prefetchable bit\n");
3629 1.98 msaitoh flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3630 1.98 msaitoh }
3631 1.333 msaitoh if (bus_space_map(sc->osdep.mem_bus_space_tag, addr,
3632 1.333 msaitoh sc->osdep.mem_size, flags,
3633 1.333 msaitoh &sc->osdep.mem_bus_space_handle) != 0) {
3634 1.98 msaitoh map_err:
3635 1.333 msaitoh sc->osdep.mem_size = 0;
3636 1.98 msaitoh aprint_error_dev(dev, "unable to map BAR0\n");
3637 1.98 msaitoh return ENXIO;
3638 1.98 msaitoh }
3639 1.171 msaitoh /*
3640 1.171 msaitoh * Enable address decoding for memory range in case BIOS or
3641 1.171 msaitoh * UEFI don't set it.
3642 1.171 msaitoh */
3643 1.171 msaitoh csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3644 1.171 msaitoh PCI_COMMAND_STATUS_REG);
3645 1.171 msaitoh csr |= PCI_COMMAND_MEM_ENABLE;
3646 1.171 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3647 1.171 msaitoh csr);
3648 1.98 msaitoh break;
3649 1.98 msaitoh default:
3650 1.98 msaitoh aprint_error_dev(dev, "unexpected type on BAR0\n");
3651 1.98 msaitoh return ENXIO;
3652 1.98 msaitoh }
3653 1.1 dyoung
3654 1.98 msaitoh return (0);
3655 1.99 msaitoh } /* ixgbe_allocate_pci_resources */
3656 1.1 dyoung
3657 1.119 msaitoh static void
3658 1.333 msaitoh ixgbe_free_deferred_handlers(struct ixgbe_softc *sc)
3659 1.119 msaitoh {
3660 1.333 msaitoh struct ix_queue *que = sc->queues;
3661 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3662 1.119 msaitoh int i;
3663 1.119 msaitoh
3664 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++, txr++) {
3665 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3666 1.119 msaitoh if (txr->txr_si != NULL)
3667 1.119 msaitoh softint_disestablish(txr->txr_si);
3668 1.119 msaitoh }
3669 1.119 msaitoh if (que->que_si != NULL)
3670 1.119 msaitoh softint_disestablish(que->que_si);
3671 1.119 msaitoh }
3672 1.333 msaitoh if (sc->txr_wq != NULL)
3673 1.333 msaitoh workqueue_destroy(sc->txr_wq);
3674 1.333 msaitoh if (sc->txr_wq_enqueued != NULL)
3675 1.333 msaitoh percpu_free(sc->txr_wq_enqueued, sizeof(u_int));
3676 1.333 msaitoh if (sc->que_wq != NULL)
3677 1.333 msaitoh workqueue_destroy(sc->que_wq);
3678 1.333 msaitoh
3679 1.333 msaitoh if (sc->admin_wq != NULL) {
3680 1.333 msaitoh workqueue_destroy(sc->admin_wq);
3681 1.333 msaitoh sc->admin_wq = NULL;
3682 1.333 msaitoh }
3683 1.333 msaitoh if (sc->timer_wq != NULL) {
3684 1.333 msaitoh workqueue_destroy(sc->timer_wq);
3685 1.333 msaitoh sc->timer_wq = NULL;
3686 1.233 msaitoh }
3687 1.333 msaitoh if (sc->recovery_mode_timer_wq != NULL) {
3688 1.236 msaitoh /*
3689 1.236 msaitoh * ixgbe_ifstop() doesn't call the workqueue_wait() for
3690 1.236 msaitoh * the recovery_mode_timer workqueue, so call it here.
3691 1.236 msaitoh */
3692 1.333 msaitoh workqueue_wait(sc->recovery_mode_timer_wq,
3693 1.333 msaitoh &sc->recovery_mode_timer_wc);
3694 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
3695 1.333 msaitoh workqueue_destroy(sc->recovery_mode_timer_wq);
3696 1.333 msaitoh sc->recovery_mode_timer_wq = NULL;
3697 1.119 msaitoh }
3698 1.257 msaitoh } /* ixgbe_free_deferred_handlers */
3699 1.119 msaitoh
3700 1.99 msaitoh /************************************************************************
3701 1.99 msaitoh * ixgbe_detach - Device removal routine
3702 1.1 dyoung *
3703 1.99 msaitoh * Called when the driver is being removed.
3704 1.99 msaitoh * Stops the adapter and deallocates all the resources
3705 1.99 msaitoh * that were allocated for driver operation.
3706 1.1 dyoung *
3707 1.99 msaitoh * return 0 on success, positive on failure
3708 1.99 msaitoh ************************************************************************/
3709 1.98 msaitoh static int
3710 1.98 msaitoh ixgbe_detach(device_t dev, int flags)
3711 1.1 dyoung {
3712 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3713 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings;
3714 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
3715 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3716 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf;
3717 1.98 msaitoh u32 ctrl_ext;
3718 1.168 msaitoh int i;
3719 1.28 msaitoh
3720 1.98 msaitoh INIT_DEBUGOUT("ixgbe_detach: begin");
3721 1.333 msaitoh if (sc->osdep.attached == false)
3722 1.98 msaitoh return 0;
3723 1.26 msaitoh
3724 1.99 msaitoh if (ixgbe_pci_iov_detach(dev) != 0) {
3725 1.99 msaitoh device_printf(dev, "SR-IOV in use; detach first.\n");
3726 1.99 msaitoh return (EBUSY);
3727 1.99 msaitoh }
3728 1.99 msaitoh
3729 1.333 msaitoh if (VLAN_ATTACHED(&sc->osdep.ec) &&
3730 1.293 yamaguch (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) {
3731 1.99 msaitoh aprint_error_dev(dev, "VLANs in use, detach first\n");
3732 1.99 msaitoh return (EBUSY);
3733 1.26 msaitoh }
3734 1.293 yamaguch
3735 1.333 msaitoh ether_ifdetach(sc->ifp);
3736 1.24 msaitoh
3737 1.333 msaitoh sc->osdep.detaching = true;
3738 1.241 msaitoh /*
3739 1.252 msaitoh * Stop the interface. ixgbe_setup_low_power_mode() calls
3740 1.253 msaitoh * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3741 1.252 msaitoh * directly.
3742 1.241 msaitoh */
3743 1.333 msaitoh ixgbe_setup_low_power_mode(sc);
3744 1.241 msaitoh
3745 1.333 msaitoh callout_halt(&sc->timer, NULL);
3746 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3747 1.333 msaitoh callout_halt(&sc->recovery_mode_timer, NULL);
3748 1.333 msaitoh
3749 1.333 msaitoh workqueue_wait(sc->admin_wq, &sc->admin_wc);
3750 1.333 msaitoh atomic_store_relaxed(&sc->admin_pending, 0);
3751 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
3752 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
3753 1.241 msaitoh
3754 1.98 msaitoh pmf_device_deregister(dev);
3755 1.26 msaitoh
3756 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
3757 1.185 msaitoh
3758 1.98 msaitoh /* let hardware know driver is unloading */
3759 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3760 1.98 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3761 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3762 1.24 msaitoh
3763 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP)
3764 1.333 msaitoh netmap_detach(sc->ifp);
3765 1.99 msaitoh
3766 1.333 msaitoh ixgbe_free_pci_resources(sc);
3767 1.98 msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */
3768 1.98 msaitoh bus_generic_detach(dev);
3769 1.98 msaitoh #endif
3770 1.333 msaitoh if_detach(sc->ifp);
3771 1.333 msaitoh ifmedia_fini(&sc->media);
3772 1.333 msaitoh if_percpuq_destroy(sc->ipq);
3773 1.333 msaitoh
3774 1.333 msaitoh sysctl_teardown(&sc->sysctllog);
3775 1.333 msaitoh evcnt_detach(&sc->efbig_tx_dma_setup);
3776 1.333 msaitoh evcnt_detach(&sc->mbuf_defrag_failed);
3777 1.333 msaitoh evcnt_detach(&sc->efbig2_tx_dma_setup);
3778 1.333 msaitoh evcnt_detach(&sc->einval_tx_dma_setup);
3779 1.333 msaitoh evcnt_detach(&sc->other_tx_dma_setup);
3780 1.333 msaitoh evcnt_detach(&sc->eagain_tx_dma_setup);
3781 1.333 msaitoh evcnt_detach(&sc->enomem_tx_dma_setup);
3782 1.333 msaitoh evcnt_detach(&sc->watchdog_events);
3783 1.333 msaitoh evcnt_detach(&sc->tso_err);
3784 1.333 msaitoh evcnt_detach(&sc->admin_irqev);
3785 1.333 msaitoh evcnt_detach(&sc->link_workev);
3786 1.333 msaitoh evcnt_detach(&sc->mod_workev);
3787 1.333 msaitoh evcnt_detach(&sc->msf_workev);
3788 1.333 msaitoh evcnt_detach(&sc->phy_workev);
3789 1.1 dyoung
3790 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3791 1.98 msaitoh if (i < __arraycount(stats->mpc)) {
3792 1.98 msaitoh evcnt_detach(&stats->mpc[i]);
3793 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
3794 1.98 msaitoh evcnt_detach(&stats->rnbc[i]);
3795 1.98 msaitoh }
3796 1.98 msaitoh if (i < __arraycount(stats->pxontxc)) {
3797 1.98 msaitoh evcnt_detach(&stats->pxontxc[i]);
3798 1.98 msaitoh evcnt_detach(&stats->pxonrxc[i]);
3799 1.98 msaitoh evcnt_detach(&stats->pxofftxc[i]);
3800 1.98 msaitoh evcnt_detach(&stats->pxoffrxc[i]);
3801 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3802 1.151 msaitoh evcnt_detach(&stats->pxon2offc[i]);
3803 1.98 msaitoh }
3804 1.168 msaitoh }
3805 1.168 msaitoh
3806 1.333 msaitoh txr = sc->tx_rings;
3807 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) {
3808 1.333 msaitoh evcnt_detach(&sc->queues[i].irqs);
3809 1.333 msaitoh evcnt_detach(&sc->queues[i].handleq);
3810 1.333 msaitoh evcnt_detach(&sc->queues[i].req);
3811 1.168 msaitoh evcnt_detach(&txr->total_packets);
3812 1.168 msaitoh #ifndef IXGBE_LEGACY_TX
3813 1.168 msaitoh evcnt_detach(&txr->pcq_drops);
3814 1.168 msaitoh #endif
3815 1.327 msaitoh evcnt_detach(&txr->no_desc_avail);
3816 1.327 msaitoh evcnt_detach(&txr->tso_tx);
3817 1.168 msaitoh
3818 1.98 msaitoh if (i < __arraycount(stats->qprc)) {
3819 1.98 msaitoh evcnt_detach(&stats->qprc[i]);
3820 1.98 msaitoh evcnt_detach(&stats->qptc[i]);
3821 1.98 msaitoh evcnt_detach(&stats->qbrc[i]);
3822 1.98 msaitoh evcnt_detach(&stats->qbtc[i]);
3823 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB)
3824 1.151 msaitoh evcnt_detach(&stats->qprdc[i]);
3825 1.34 msaitoh }
3826 1.98 msaitoh
3827 1.98 msaitoh evcnt_detach(&rxr->rx_packets);
3828 1.98 msaitoh evcnt_detach(&rxr->rx_bytes);
3829 1.98 msaitoh evcnt_detach(&rxr->rx_copies);
3830 1.290 msaitoh evcnt_detach(&rxr->no_mbuf);
3831 1.98 msaitoh evcnt_detach(&rxr->rx_discarded);
3832 1.1 dyoung }
3833 1.98 msaitoh evcnt_detach(&stats->ipcs);
3834 1.98 msaitoh evcnt_detach(&stats->l4cs);
3835 1.98 msaitoh evcnt_detach(&stats->ipcs_bad);
3836 1.98 msaitoh evcnt_detach(&stats->l4cs_bad);
3837 1.98 msaitoh evcnt_detach(&stats->intzero);
3838 1.98 msaitoh evcnt_detach(&stats->legint);
3839 1.98 msaitoh evcnt_detach(&stats->crcerrs);
3840 1.98 msaitoh evcnt_detach(&stats->illerrc);
3841 1.98 msaitoh evcnt_detach(&stats->errbc);
3842 1.98 msaitoh evcnt_detach(&stats->mspdc);
3843 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550)
3844 1.98 msaitoh evcnt_detach(&stats->mbsdc);
3845 1.98 msaitoh evcnt_detach(&stats->mpctotal);
3846 1.98 msaitoh evcnt_detach(&stats->mlfc);
3847 1.98 msaitoh evcnt_detach(&stats->mrfc);
3848 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a)
3849 1.326 msaitoh evcnt_detach(&stats->link_dn_cnt);
3850 1.98 msaitoh evcnt_detach(&stats->rlec);
3851 1.98 msaitoh evcnt_detach(&stats->lxontxc);
3852 1.98 msaitoh evcnt_detach(&stats->lxonrxc);
3853 1.98 msaitoh evcnt_detach(&stats->lxofftxc);
3854 1.98 msaitoh evcnt_detach(&stats->lxoffrxc);
3855 1.98 msaitoh
3856 1.98 msaitoh /* Packet Reception Stats */
3857 1.98 msaitoh evcnt_detach(&stats->tor);
3858 1.98 msaitoh evcnt_detach(&stats->gorc);
3859 1.98 msaitoh evcnt_detach(&stats->tpr);
3860 1.98 msaitoh evcnt_detach(&stats->gprc);
3861 1.98 msaitoh evcnt_detach(&stats->mprc);
3862 1.98 msaitoh evcnt_detach(&stats->bprc);
3863 1.98 msaitoh evcnt_detach(&stats->prc64);
3864 1.98 msaitoh evcnt_detach(&stats->prc127);
3865 1.98 msaitoh evcnt_detach(&stats->prc255);
3866 1.98 msaitoh evcnt_detach(&stats->prc511);
3867 1.98 msaitoh evcnt_detach(&stats->prc1023);
3868 1.98 msaitoh evcnt_detach(&stats->prc1522);
3869 1.98 msaitoh evcnt_detach(&stats->ruc);
3870 1.98 msaitoh evcnt_detach(&stats->rfc);
3871 1.98 msaitoh evcnt_detach(&stats->roc);
3872 1.98 msaitoh evcnt_detach(&stats->rjc);
3873 1.98 msaitoh evcnt_detach(&stats->mngprc);
3874 1.98 msaitoh evcnt_detach(&stats->mngpdc);
3875 1.98 msaitoh evcnt_detach(&stats->xec);
3876 1.1 dyoung
3877 1.98 msaitoh /* Packet Transmission Stats */
3878 1.98 msaitoh evcnt_detach(&stats->gotc);
3879 1.98 msaitoh evcnt_detach(&stats->tpt);
3880 1.98 msaitoh evcnt_detach(&stats->gptc);
3881 1.98 msaitoh evcnt_detach(&stats->bptc);
3882 1.98 msaitoh evcnt_detach(&stats->mptc);
3883 1.98 msaitoh evcnt_detach(&stats->mngptc);
3884 1.98 msaitoh evcnt_detach(&stats->ptc64);
3885 1.98 msaitoh evcnt_detach(&stats->ptc127);
3886 1.98 msaitoh evcnt_detach(&stats->ptc255);
3887 1.98 msaitoh evcnt_detach(&stats->ptc511);
3888 1.98 msaitoh evcnt_detach(&stats->ptc1023);
3889 1.98 msaitoh evcnt_detach(&stats->ptc1522);
3890 1.1 dyoung
3891 1.333 msaitoh ixgbe_free_queues(sc);
3892 1.333 msaitoh free(sc->mta, M_DEVBUF);
3893 1.1 dyoung
3894 1.333 msaitoh mutex_destroy(&sc->admin_mtx); /* XXX appropriate order? */
3895 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc);
3896 1.1 dyoung
3897 1.1 dyoung return (0);
3898 1.99 msaitoh } /* ixgbe_detach */
3899 1.1 dyoung
3900 1.99 msaitoh /************************************************************************
3901 1.99 msaitoh * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3902 1.99 msaitoh *
3903 1.99 msaitoh * Prepare the adapter/port for LPLU and/or WoL
3904 1.99 msaitoh ************************************************************************/
3905 1.1 dyoung static int
3906 1.333 msaitoh ixgbe_setup_low_power_mode(struct ixgbe_softc *sc)
3907 1.1 dyoung {
3908 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
3909 1.333 msaitoh device_t dev = sc->dev;
3910 1.333 msaitoh struct ifnet *ifp = sc->ifp;
3911 1.186 msaitoh s32 error = 0;
3912 1.98 msaitoh
3913 1.98 msaitoh /* Limit power management flow to X550EM baseT */
3914 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3915 1.99 msaitoh hw->phy.ops.enter_lplu) {
3916 1.98 msaitoh /* X550EM baseT adapters need a special LPLU flow */
3917 1.98 msaitoh hw->phy.reset_disable = true;
3918 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3919 1.98 msaitoh error = hw->phy.ops.enter_lplu(hw);
3920 1.98 msaitoh if (error)
3921 1.98 msaitoh device_printf(dev,
3922 1.98 msaitoh "Error entering LPLU: %d\n", error);
3923 1.98 msaitoh hw->phy.reset_disable = false;
3924 1.98 msaitoh } else {
3925 1.98 msaitoh /* Just stop for other adapters */
3926 1.253 msaitoh ixgbe_ifstop(ifp, 1);
3927 1.33 msaitoh }
3928 1.1 dyoung
3929 1.333 msaitoh IXGBE_CORE_LOCK(sc);
3930 1.253 msaitoh
3931 1.98 msaitoh if (!hw->wol_enabled) {
3932 1.98 msaitoh ixgbe_set_phy_power(hw, FALSE);
3933 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3934 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3935 1.98 msaitoh } else {
3936 1.98 msaitoh /* Turn off support for APM wakeup. (Using ACPI instead) */
3937 1.166 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3938 1.166 msaitoh IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3939 1.34 msaitoh
3940 1.35 msaitoh /*
3941 1.98 msaitoh * Clear Wake Up Status register to prevent any previous wakeup
3942 1.98 msaitoh * events from waking us up immediately after we suspend.
3943 1.33 msaitoh */
3944 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3945 1.98 msaitoh
3946 1.1 dyoung /*
3947 1.98 msaitoh * Program the Wakeup Filter Control register with user filter
3948 1.98 msaitoh * settings
3949 1.33 msaitoh */
3950 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3951 1.98 msaitoh
3952 1.98 msaitoh /* Enable wakeups and power management in Wakeup Control */
3953 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC,
3954 1.98 msaitoh IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3955 1.1 dyoung }
3956 1.1 dyoung
3957 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
3958 1.253 msaitoh
3959 1.98 msaitoh return error;
3960 1.99 msaitoh } /* ixgbe_setup_low_power_mode */
3961 1.98 msaitoh
3962 1.99 msaitoh /************************************************************************
3963 1.99 msaitoh * ixgbe_shutdown - Shutdown entry point
3964 1.99 msaitoh ************************************************************************/
3965 1.98 msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3966 1.98 msaitoh static int
3967 1.98 msaitoh ixgbe_shutdown(device_t dev)
3968 1.98 msaitoh {
3969 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3970 1.98 msaitoh int error = 0;
3971 1.34 msaitoh
3972 1.98 msaitoh INIT_DEBUGOUT("ixgbe_shutdown: begin");
3973 1.34 msaitoh
3974 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3975 1.1 dyoung
3976 1.98 msaitoh return (error);
3977 1.99 msaitoh } /* ixgbe_shutdown */
3978 1.98 msaitoh #endif
3979 1.1 dyoung
3980 1.99 msaitoh /************************************************************************
3981 1.99 msaitoh * ixgbe_suspend
3982 1.99 msaitoh *
3983 1.99 msaitoh * From D0 to D3
3984 1.99 msaitoh ************************************************************************/
3985 1.98 msaitoh static bool
3986 1.98 msaitoh ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3987 1.1 dyoung {
3988 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
3989 1.186 msaitoh int error = 0;
3990 1.98 msaitoh
3991 1.98 msaitoh INIT_DEBUGOUT("ixgbe_suspend: begin");
3992 1.98 msaitoh
3993 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc);
3994 1.1 dyoung
3995 1.98 msaitoh return (error);
3996 1.99 msaitoh } /* ixgbe_suspend */
3997 1.1 dyoung
3998 1.99 msaitoh /************************************************************************
3999 1.99 msaitoh * ixgbe_resume
4000 1.99 msaitoh *
4001 1.99 msaitoh * From D3 to D0
4002 1.99 msaitoh ************************************************************************/
4003 1.98 msaitoh static bool
4004 1.98 msaitoh ixgbe_resume(device_t dev, const pmf_qual_t *qual)
4005 1.98 msaitoh {
4006 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev);
4007 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4008 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4009 1.186 msaitoh u32 wus;
4010 1.1 dyoung
4011 1.98 msaitoh INIT_DEBUGOUT("ixgbe_resume: begin");
4012 1.33 msaitoh
4013 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4014 1.43 msaitoh
4015 1.98 msaitoh /* Read & clear WUS register */
4016 1.98 msaitoh wus = IXGBE_READ_REG(hw, IXGBE_WUS);
4017 1.98 msaitoh if (wus)
4018 1.98 msaitoh device_printf(dev, "Woken up by (WUS): %#010x\n",
4019 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_WUS));
4020 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4021 1.98 msaitoh /* And clear WUFC until next low-power transition */
4022 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4023 1.1 dyoung
4024 1.1 dyoung /*
4025 1.98 msaitoh * Required after D3->D0 transition;
4026 1.98 msaitoh * will re-advertise all previous advertised speeds
4027 1.98 msaitoh */
4028 1.98 msaitoh if (ifp->if_flags & IFF_UP)
4029 1.333 msaitoh ixgbe_init_locked(sc);
4030 1.34 msaitoh
4031 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4032 1.1 dyoung
4033 1.98 msaitoh return true;
4034 1.99 msaitoh } /* ixgbe_resume */
4035 1.1 dyoung
4036 1.98 msaitoh /*
4037 1.98 msaitoh * Set the various hardware offload abilities.
4038 1.98 msaitoh *
4039 1.98 msaitoh * This takes the ifnet's if_capenable flags (e.g. set by the user using
4040 1.98 msaitoh * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
4041 1.98 msaitoh * mbuf offload flags the driver will understand.
4042 1.98 msaitoh */
4043 1.1 dyoung static void
4044 1.333 msaitoh ixgbe_set_if_hwassist(struct ixgbe_softc *sc)
4045 1.1 dyoung {
4046 1.98 msaitoh /* XXX */
4047 1.1 dyoung }
4048 1.1 dyoung
4049 1.99 msaitoh /************************************************************************
4050 1.99 msaitoh * ixgbe_init_locked - Init entry point
4051 1.99 msaitoh *
4052 1.99 msaitoh * Used in two ways: It is used by the stack as an init
4053 1.99 msaitoh * entry point in network interface structure. It is also
4054 1.99 msaitoh * used by the driver as a hw/sw initialization routine to
4055 1.99 msaitoh * get to a consistent state.
4056 1.1 dyoung *
4057 1.99 msaitoh * return 0 on success, positive on failure
4058 1.99 msaitoh ************************************************************************/
4059 1.98 msaitoh static void
4060 1.333 msaitoh ixgbe_init_locked(struct ixgbe_softc *sc)
4061 1.1 dyoung {
4062 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4063 1.333 msaitoh device_t dev = sc->dev;
4064 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4065 1.157 msaitoh struct ix_queue *que;
4066 1.186 msaitoh struct tx_ring *txr;
4067 1.186 msaitoh struct rx_ring *rxr;
4068 1.98 msaitoh u32 txdctl, mhadd;
4069 1.98 msaitoh u32 rxdctl, rxctrl;
4070 1.186 msaitoh u32 ctrl_ext;
4071 1.219 msaitoh bool unsupported_sfp = false;
4072 1.283 msaitoh int i, j, error;
4073 1.1 dyoung
4074 1.98 msaitoh /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
4075 1.1 dyoung
4076 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4077 1.98 msaitoh INIT_DEBUGOUT("ixgbe_init_locked: begin");
4078 1.1 dyoung
4079 1.219 msaitoh hw->need_unsupported_sfp_recovery = false;
4080 1.98 msaitoh hw->adapter_stopped = FALSE;
4081 1.98 msaitoh ixgbe_stop_adapter(hw);
4082 1.333 msaitoh callout_stop(&sc->timer);
4083 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4084 1.333 msaitoh callout_stop(&sc->recovery_mode_timer);
4085 1.333 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
4086 1.157 msaitoh que->disabled_count = 0;
4087 1.1 dyoung
4088 1.98 msaitoh /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
4089 1.333 msaitoh sc->max_frame_size =
4090 1.98 msaitoh ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4091 1.1 dyoung
4092 1.98 msaitoh /* Queue indices may change with IOV mode */
4093 1.333 msaitoh ixgbe_align_all_queue_indices(sc);
4094 1.99 msaitoh
4095 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
4096 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
4097 1.1 dyoung
4098 1.98 msaitoh /* Get the latest mac address, User can use a LAA */
4099 1.98 msaitoh memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
4100 1.98 msaitoh IXGBE_ETH_LENGTH_OF_ADDRESS);
4101 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
4102 1.98 msaitoh hw->addr_ctrl.rar_used_count = 1;
4103 1.1 dyoung
4104 1.98 msaitoh /* Set hardware offload abilities from ifnet flags */
4105 1.333 msaitoh ixgbe_set_if_hwassist(sc);
4106 1.48 msaitoh
4107 1.98 msaitoh /* Prepare transmit descriptors and buffers */
4108 1.333 msaitoh if (ixgbe_setup_transmit_structures(sc)) {
4109 1.98 msaitoh device_printf(dev, "Could not setup transmit structures\n");
4110 1.333 msaitoh ixgbe_stop_locked(sc);
4111 1.98 msaitoh return;
4112 1.98 msaitoh }
4113 1.1 dyoung
4114 1.98 msaitoh ixgbe_init_hw(hw);
4115 1.144 msaitoh
4116 1.333 msaitoh ixgbe_initialize_iov(sc);
4117 1.144 msaitoh
4118 1.333 msaitoh ixgbe_initialize_transmit_units(sc);
4119 1.1 dyoung
4120 1.98 msaitoh /* Setup Multicast table */
4121 1.333 msaitoh ixgbe_set_rxfilter(sc);
4122 1.43 msaitoh
4123 1.289 msaitoh /* Use fixed buffer size, even for jumbo frames */
4124 1.333 msaitoh sc->rx_mbuf_sz = MCLBYTES;
4125 1.43 msaitoh
4126 1.98 msaitoh /* Prepare receive descriptors and buffers */
4127 1.333 msaitoh error = ixgbe_setup_receive_structures(sc);
4128 1.283 msaitoh if (error) {
4129 1.283 msaitoh device_printf(dev,
4130 1.283 msaitoh "Could not setup receive structures (err = %d)\n", error);
4131 1.333 msaitoh ixgbe_stop_locked(sc);
4132 1.98 msaitoh return;
4133 1.98 msaitoh }
4134 1.43 msaitoh
4135 1.98 msaitoh /* Configure RX settings */
4136 1.333 msaitoh ixgbe_initialize_receive_units(sc);
4137 1.43 msaitoh
4138 1.233 msaitoh /* Initialize variable holding task enqueue requests interrupts */
4139 1.333 msaitoh sc->task_requests = 0;
4140 1.233 msaitoh
4141 1.99 msaitoh /* Enable SDP & MSI-X interrupts based on adapter */
4142 1.333 msaitoh ixgbe_config_gpie(sc);
4143 1.43 msaitoh
4144 1.98 msaitoh /* Set MTU size */
4145 1.98 msaitoh if (ifp->if_mtu > ETHERMTU) {
4146 1.98 msaitoh /* aka IXGBE_MAXFRS on 82599 and newer */
4147 1.98 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4148 1.98 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
4149 1.333 msaitoh mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4150 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4151 1.55 msaitoh }
4152 1.55 msaitoh
4153 1.98 msaitoh /* Now enable all the queues */
4154 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4155 1.333 msaitoh txr = &sc->tx_rings[i];
4156 1.98 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4157 1.98 msaitoh txdctl |= IXGBE_TXDCTL_ENABLE;
4158 1.98 msaitoh /* Set WTHRESH to 8, burst writeback */
4159 1.292 msaitoh txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT;
4160 1.98 msaitoh /*
4161 1.98 msaitoh * When the internal queue falls below PTHRESH (32),
4162 1.98 msaitoh * start prefetching as long as there are at least
4163 1.98 msaitoh * HTHRESH (1) buffers ready. The values are taken
4164 1.98 msaitoh * from the Intel linux driver 3.8.21.
4165 1.98 msaitoh * Prefetching enables tx line rate even with 1 queue.
4166 1.98 msaitoh */
4167 1.98 msaitoh txdctl |= (32 << 0) | (1 << 8);
4168 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4169 1.55 msaitoh }
4170 1.43 msaitoh
4171 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
4172 1.333 msaitoh rxr = &sc->rx_rings[i];
4173 1.98 msaitoh rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4174 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4175 1.98 msaitoh /*
4176 1.99 msaitoh * PTHRESH = 21
4177 1.99 msaitoh * HTHRESH = 4
4178 1.99 msaitoh * WTHRESH = 8
4179 1.99 msaitoh */
4180 1.98 msaitoh rxdctl &= ~0x3FFFFF;
4181 1.98 msaitoh rxdctl |= 0x080420;
4182 1.98 msaitoh }
4183 1.98 msaitoh rxdctl |= IXGBE_RXDCTL_ENABLE;
4184 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4185 1.144 msaitoh for (j = 0; j < 10; j++) {
4186 1.98 msaitoh if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4187 1.98 msaitoh IXGBE_RXDCTL_ENABLE)
4188 1.98 msaitoh break;
4189 1.98 msaitoh else
4190 1.98 msaitoh msec_delay(1);
4191 1.55 msaitoh }
4192 1.217 msaitoh IXGBE_WRITE_BARRIER(hw);
4193 1.99 msaitoh
4194 1.98 msaitoh /*
4195 1.98 msaitoh * In netmap mode, we must preserve the buffers made
4196 1.98 msaitoh * available to userspace before the if_init()
4197 1.98 msaitoh * (this is true by default on the TX side, because
4198 1.98 msaitoh * init makes all buffers available to userspace).
4199 1.98 msaitoh *
4200 1.98 msaitoh * netmap_reset() and the device specific routines
4201 1.98 msaitoh * (e.g. ixgbe_setup_receive_rings()) map these
4202 1.98 msaitoh * buffers at the end of the NIC ring, so here we
4203 1.98 msaitoh * must set the RDT (tail) register to make sure
4204 1.98 msaitoh * they are not overwritten.
4205 1.98 msaitoh *
4206 1.98 msaitoh * In this driver the NIC ring starts at RDH = 0,
4207 1.98 msaitoh * RDT points to the last slot available for reception (?),
4208 1.98 msaitoh * so RDT = num_rx_desc - 1 means the whole ring is available.
4209 1.98 msaitoh */
4210 1.99 msaitoh #ifdef DEV_NETMAP
4211 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
4212 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP)) {
4213 1.333 msaitoh struct netmap_adapter *na = NA(sc->ifp);
4214 1.189 msaitoh struct netmap_kring *kring = na->rx_rings[i];
4215 1.98 msaitoh int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4216 1.98 msaitoh
4217 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4218 1.98 msaitoh } else
4219 1.98 msaitoh #endif /* DEV_NETMAP */
4220 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4221 1.333 msaitoh sc->num_rx_desc - 1);
4222 1.48 msaitoh }
4223 1.98 msaitoh
4224 1.98 msaitoh /* Enable Receive engine */
4225 1.98 msaitoh rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4226 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4227 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_DMBYPS;
4228 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_RXEN;
4229 1.98 msaitoh ixgbe_enable_rx_dma(hw, rxctrl);
4230 1.98 msaitoh
4231 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4232 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4233 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4234 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4235 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4236 1.98 msaitoh
4237 1.144 msaitoh /* Set up MSI/MSI-X routing */
4238 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4239 1.333 msaitoh ixgbe_configure_ivars(sc);
4240 1.98 msaitoh /* Set up auto-mask */
4241 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4242 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4243 1.98 msaitoh else {
4244 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4245 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4246 1.55 msaitoh }
4247 1.98 msaitoh } else { /* Simple settings for Legacy/MSI */
4248 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 0);
4249 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 1);
4250 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4251 1.55 msaitoh }
4252 1.43 msaitoh
4253 1.333 msaitoh ixgbe_init_fdir(sc);
4254 1.98 msaitoh
4255 1.98 msaitoh /*
4256 1.98 msaitoh * Check on any SFP devices that
4257 1.98 msaitoh * need to be kick-started
4258 1.98 msaitoh */
4259 1.98 msaitoh if (hw->phy.type == ixgbe_phy_none) {
4260 1.283 msaitoh error = hw->phy.ops.identify(hw);
4261 1.283 msaitoh if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
4262 1.219 msaitoh unsupported_sfp = true;
4263 1.219 msaitoh } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4264 1.219 msaitoh unsupported_sfp = true;
4265 1.219 msaitoh
4266 1.219 msaitoh if (unsupported_sfp)
4267 1.219 msaitoh device_printf(dev,
4268 1.219 msaitoh "Unsupported SFP+ module type was detected.\n");
4269 1.98 msaitoh
4270 1.98 msaitoh /* Set moderation on the Link interrupt */
4271 1.333 msaitoh ixgbe_eitr_write(sc, sc->vector, IXGBE_LINK_ITR);
4272 1.98 msaitoh
4273 1.173 msaitoh /* Enable EEE power saving */
4274 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
4275 1.173 msaitoh hw->mac.ops.setup_eee(hw,
4276 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE);
4277 1.173 msaitoh
4278 1.144 msaitoh /* Enable power to the phy. */
4279 1.219 msaitoh if (!unsupported_sfp) {
4280 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE);
4281 1.144 msaitoh
4282 1.219 msaitoh /* Config/Enable Link */
4283 1.333 msaitoh ixgbe_config_link(sc);
4284 1.219 msaitoh }
4285 1.55 msaitoh
4286 1.98 msaitoh /* Hardware Packet Buffer & Flow Control setup */
4287 1.333 msaitoh ixgbe_config_delay_values(sc);
4288 1.1 dyoung
4289 1.98 msaitoh /* Initialize the FC settings */
4290 1.98 msaitoh ixgbe_start_hw(hw);
4291 1.1 dyoung
4292 1.98 msaitoh /* Set up VLAN support and filter */
4293 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
4294 1.1 dyoung
4295 1.98 msaitoh /* Setup DMA Coalescing */
4296 1.333 msaitoh ixgbe_config_dmac(sc);
4297 1.98 msaitoh
4298 1.230 msaitoh /* OK to schedule workqueues. */
4299 1.333 msaitoh sc->schedule_wqs_ok = true;
4300 1.230 msaitoh
4301 1.98 msaitoh /* Enable the use of the MBX by the VF's */
4302 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
4303 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4304 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4305 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4306 1.1 dyoung }
4307 1.98 msaitoh
4308 1.123 msaitoh /* Update saved flags. See ixgbe_ifflags_cb() */
4309 1.333 msaitoh sc->if_flags = ifp->if_flags;
4310 1.333 msaitoh sc->ec_capenable = sc->osdep.ec.ec_capenable;
4311 1.123 msaitoh
4312 1.337 msaitoh /* Inform the stack we're ready */
4313 1.98 msaitoh ifp->if_flags |= IFF_RUNNING;
4314 1.98 msaitoh
4315 1.337 msaitoh /* And now turn on interrupts */
4316 1.337 msaitoh ixgbe_enable_intr(sc);
4317 1.337 msaitoh
4318 1.1 dyoung return;
4319 1.99 msaitoh } /* ixgbe_init_locked */
4320 1.1 dyoung
4321 1.99 msaitoh /************************************************************************
4322 1.99 msaitoh * ixgbe_init
4323 1.99 msaitoh ************************************************************************/
4324 1.98 msaitoh static int
4325 1.98 msaitoh ixgbe_init(struct ifnet *ifp)
4326 1.98 msaitoh {
4327 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
4328 1.98 msaitoh
4329 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4330 1.333 msaitoh ixgbe_init_locked(sc);
4331 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4332 1.98 msaitoh
4333 1.98 msaitoh return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4334 1.99 msaitoh } /* ixgbe_init */
4335 1.43 msaitoh
4336 1.99 msaitoh /************************************************************************
4337 1.99 msaitoh * ixgbe_set_ivar
4338 1.99 msaitoh *
4339 1.99 msaitoh * Setup the correct IVAR register for a particular MSI-X interrupt
4340 1.99 msaitoh * (yes this is all very magic and confusing :)
4341 1.99 msaitoh * - entry is the register array entry
4342 1.99 msaitoh * - vector is the MSI-X vector for this queue
4343 1.99 msaitoh * - type is RX/TX/MISC
4344 1.99 msaitoh ************************************************************************/
4345 1.42 msaitoh static void
4346 1.333 msaitoh ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
4347 1.1 dyoung {
4348 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4349 1.98 msaitoh u32 ivar, index;
4350 1.98 msaitoh
4351 1.98 msaitoh vector |= IXGBE_IVAR_ALLOC_VAL;
4352 1.98 msaitoh
4353 1.98 msaitoh switch (hw->mac.type) {
4354 1.98 msaitoh case ixgbe_mac_82598EB:
4355 1.98 msaitoh if (type == -1)
4356 1.98 msaitoh entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4357 1.98 msaitoh else
4358 1.98 msaitoh entry += (type * 64);
4359 1.98 msaitoh index = (entry >> 2) & 0x1F;
4360 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4361 1.198 msaitoh ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4362 1.198 msaitoh ivar |= ((u32)vector << (8 * (entry & 0x3)));
4363 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
4364 1.98 msaitoh break;
4365 1.98 msaitoh case ixgbe_mac_82599EB:
4366 1.98 msaitoh case ixgbe_mac_X540:
4367 1.98 msaitoh case ixgbe_mac_X550:
4368 1.98 msaitoh case ixgbe_mac_X550EM_x:
4369 1.99 msaitoh case ixgbe_mac_X550EM_a:
4370 1.98 msaitoh if (type == -1) { /* MISC IVAR */
4371 1.98 msaitoh index = (entry & 1) * 8;
4372 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4373 1.194 msaitoh ivar &= ~(0xffUL << index);
4374 1.194 msaitoh ivar |= ((u32)vector << index);
4375 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4376 1.98 msaitoh } else { /* RX/TX IVARS */
4377 1.98 msaitoh index = (16 * (entry & 1)) + (8 * type);
4378 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4379 1.194 msaitoh ivar &= ~(0xffUL << index);
4380 1.194 msaitoh ivar |= ((u32)vector << index);
4381 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4382 1.98 msaitoh }
4383 1.135 msaitoh break;
4384 1.98 msaitoh default:
4385 1.98 msaitoh break;
4386 1.98 msaitoh }
4387 1.99 msaitoh } /* ixgbe_set_ivar */
4388 1.1 dyoung
4389 1.99 msaitoh /************************************************************************
4390 1.99 msaitoh * ixgbe_configure_ivars
4391 1.99 msaitoh ************************************************************************/
4392 1.98 msaitoh static void
4393 1.333 msaitoh ixgbe_configure_ivars(struct ixgbe_softc *sc)
4394 1.98 msaitoh {
4395 1.333 msaitoh struct ix_queue *que = sc->queues;
4396 1.186 msaitoh u32 newitr;
4397 1.1 dyoung
4398 1.343 msaitoh if (sc->max_interrupt_rate > 0)
4399 1.343 msaitoh newitr = (4000000 / sc->max_interrupt_rate) & 0x0FF8;
4400 1.98 msaitoh else {
4401 1.48 msaitoh /*
4402 1.99 msaitoh * Disable DMA coalescing if interrupt moderation is
4403 1.99 msaitoh * disabled.
4404 1.99 msaitoh */
4405 1.333 msaitoh sc->dmac = 0;
4406 1.98 msaitoh newitr = 0;
4407 1.98 msaitoh }
4408 1.98 msaitoh
4409 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
4410 1.333 msaitoh struct rx_ring *rxr = &sc->rx_rings[i];
4411 1.333 msaitoh struct tx_ring *txr = &sc->tx_rings[i];
4412 1.98 msaitoh /* First the RX queue entry */
4413 1.333 msaitoh ixgbe_set_ivar(sc, rxr->me, que->msix, 0);
4414 1.98 msaitoh /* ... and the TX */
4415 1.333 msaitoh ixgbe_set_ivar(sc, txr->me, que->msix, 1);
4416 1.98 msaitoh /* Set an Initial EITR value */
4417 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, newitr);
4418 1.138 knakahar /*
4419 1.138 knakahar * To eliminate influence of the previous state.
4420 1.138 knakahar * At this point, Tx/Rx interrupt handler
4421 1.138 knakahar * (ixgbe_msix_que()) cannot be called, so both
4422 1.138 knakahar * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4423 1.138 knakahar */
4424 1.138 knakahar que->eitr_setting = 0;
4425 1.98 msaitoh }
4426 1.98 msaitoh
4427 1.98 msaitoh /* For the Link interrupt */
4428 1.333 msaitoh ixgbe_set_ivar(sc, 1, sc->vector, -1);
4429 1.99 msaitoh } /* ixgbe_configure_ivars */
4430 1.98 msaitoh
4431 1.99 msaitoh /************************************************************************
4432 1.99 msaitoh * ixgbe_config_gpie
4433 1.99 msaitoh ************************************************************************/
4434 1.98 msaitoh static void
4435 1.333 msaitoh ixgbe_config_gpie(struct ixgbe_softc *sc)
4436 1.98 msaitoh {
4437 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4438 1.186 msaitoh u32 gpie;
4439 1.98 msaitoh
4440 1.98 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4441 1.98 msaitoh
4442 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) {
4443 1.99 msaitoh /* Enable Enhanced MSI-X mode */
4444 1.99 msaitoh gpie |= IXGBE_GPIE_MSIX_MODE
4445 1.186 msaitoh | IXGBE_GPIE_EIAME
4446 1.186 msaitoh | IXGBE_GPIE_PBA_SUPPORT
4447 1.186 msaitoh | IXGBE_GPIE_OCD;
4448 1.99 msaitoh }
4449 1.99 msaitoh
4450 1.98 msaitoh /* Fan Failure Interrupt */
4451 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4452 1.98 msaitoh gpie |= IXGBE_SDP1_GPIEN;
4453 1.1 dyoung
4454 1.99 msaitoh /* Thermal Sensor Interrupt */
4455 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4456 1.99 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4457 1.1 dyoung
4458 1.99 msaitoh /* Link detection */
4459 1.99 msaitoh switch (hw->mac.type) {
4460 1.99 msaitoh case ixgbe_mac_82599EB:
4461 1.99 msaitoh gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4462 1.99 msaitoh break;
4463 1.99 msaitoh case ixgbe_mac_X550EM_x:
4464 1.99 msaitoh case ixgbe_mac_X550EM_a:
4465 1.98 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540;
4466 1.99 msaitoh break;
4467 1.99 msaitoh default:
4468 1.99 msaitoh break;
4469 1.1 dyoung }
4470 1.1 dyoung
4471 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4472 1.98 msaitoh
4473 1.99 msaitoh } /* ixgbe_config_gpie */
4474 1.1 dyoung
4475 1.99 msaitoh /************************************************************************
4476 1.99 msaitoh * ixgbe_config_delay_values
4477 1.99 msaitoh *
4478 1.333 msaitoh * Requires sc->max_frame_size to be set.
4479 1.99 msaitoh ************************************************************************/
4480 1.33 msaitoh static void
4481 1.333 msaitoh ixgbe_config_delay_values(struct ixgbe_softc *sc)
4482 1.33 msaitoh {
4483 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4484 1.186 msaitoh u32 rxpb, frame, size, tmp;
4485 1.33 msaitoh
4486 1.333 msaitoh frame = sc->max_frame_size;
4487 1.33 msaitoh
4488 1.98 msaitoh /* Calculate High Water */
4489 1.98 msaitoh switch (hw->mac.type) {
4490 1.98 msaitoh case ixgbe_mac_X540:
4491 1.44 msaitoh case ixgbe_mac_X550:
4492 1.44 msaitoh case ixgbe_mac_X550EM_x:
4493 1.99 msaitoh case ixgbe_mac_X550EM_a:
4494 1.98 msaitoh tmp = IXGBE_DV_X540(frame, frame);
4495 1.44 msaitoh break;
4496 1.44 msaitoh default:
4497 1.98 msaitoh tmp = IXGBE_DV(frame, frame);
4498 1.44 msaitoh break;
4499 1.44 msaitoh }
4500 1.98 msaitoh size = IXGBE_BT2KB(tmp);
4501 1.98 msaitoh rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4502 1.98 msaitoh hw->fc.high_water[0] = rxpb - size;
4503 1.44 msaitoh
4504 1.98 msaitoh /* Now calculate Low Water */
4505 1.98 msaitoh switch (hw->mac.type) {
4506 1.98 msaitoh case ixgbe_mac_X540:
4507 1.98 msaitoh case ixgbe_mac_X550:
4508 1.98 msaitoh case ixgbe_mac_X550EM_x:
4509 1.99 msaitoh case ixgbe_mac_X550EM_a:
4510 1.98 msaitoh tmp = IXGBE_LOW_DV_X540(frame);
4511 1.98 msaitoh break;
4512 1.98 msaitoh default:
4513 1.98 msaitoh tmp = IXGBE_LOW_DV(frame);
4514 1.98 msaitoh break;
4515 1.33 msaitoh }
4516 1.98 msaitoh hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4517 1.33 msaitoh
4518 1.98 msaitoh hw->fc.pause_time = IXGBE_FC_PAUSE;
4519 1.98 msaitoh hw->fc.send_xon = TRUE;
4520 1.99 msaitoh } /* ixgbe_config_delay_values */
4521 1.33 msaitoh
4522 1.99 msaitoh /************************************************************************
4523 1.213 msaitoh * ixgbe_set_rxfilter - Multicast Update
4524 1.1 dyoung *
4525 1.99 msaitoh * Called whenever multicast address list is updated.
4526 1.99 msaitoh ************************************************************************/
4527 1.1 dyoung static void
4528 1.333 msaitoh ixgbe_set_rxfilter(struct ixgbe_softc *sc)
4529 1.1 dyoung {
4530 1.99 msaitoh struct ixgbe_mc_addr *mta;
4531 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4532 1.98 msaitoh u8 *update_ptr;
4533 1.98 msaitoh int mcnt = 0;
4534 1.99 msaitoh u32 fctrl;
4535 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec;
4536 1.98 msaitoh struct ether_multi *enm;
4537 1.98 msaitoh struct ether_multistep step;
4538 1.98 msaitoh
4539 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4540 1.213 msaitoh IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4541 1.98 msaitoh
4542 1.333 msaitoh mta = sc->mta;
4543 1.98 msaitoh bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4544 1.1 dyoung
4545 1.105 msaitoh ETHER_LOCK(ec);
4546 1.183 ozaki ec->ec_flags &= ~ETHER_F_ALLMULTI;
4547 1.98 msaitoh ETHER_FIRST_MULTI(step, ec, enm);
4548 1.98 msaitoh while (enm != NULL) {
4549 1.98 msaitoh if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4550 1.98 msaitoh (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4551 1.98 msaitoh ETHER_ADDR_LEN) != 0)) {
4552 1.183 ozaki ec->ec_flags |= ETHER_F_ALLMULTI;
4553 1.98 msaitoh break;
4554 1.98 msaitoh }
4555 1.98 msaitoh bcopy(enm->enm_addrlo,
4556 1.98 msaitoh mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4557 1.333 msaitoh mta[mcnt].vmdq = sc->pool;
4558 1.98 msaitoh mcnt++;
4559 1.98 msaitoh ETHER_NEXT_MULTI(step, enm);
4560 1.98 msaitoh }
4561 1.1 dyoung
4562 1.333 msaitoh fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
4563 1.98 msaitoh if (ifp->if_flags & IFF_PROMISC)
4564 1.98 msaitoh fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4565 1.183 ozaki else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4566 1.98 msaitoh fctrl |= IXGBE_FCTRL_MPE;
4567 1.212 msaitoh fctrl &= ~IXGBE_FCTRL_UPE;
4568 1.212 msaitoh } else
4569 1.212 msaitoh fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4570 1.1 dyoung
4571 1.211 msaitoh /* Update multicast filter entries only when it's not ALLMULTI */
4572 1.211 msaitoh if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4573 1.211 msaitoh ETHER_UNLOCK(ec);
4574 1.98 msaitoh update_ptr = (u8 *)mta;
4575 1.333 msaitoh ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
4576 1.99 msaitoh ixgbe_mc_array_itr, TRUE);
4577 1.211 msaitoh } else
4578 1.211 msaitoh ETHER_UNLOCK(ec);
4579 1.332 msaitoh
4580 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
4581 1.213 msaitoh } /* ixgbe_set_rxfilter */
4582 1.1 dyoung
4583 1.99 msaitoh /************************************************************************
4584 1.99 msaitoh * ixgbe_mc_array_itr
4585 1.99 msaitoh *
4586 1.99 msaitoh * An iterator function needed by the multicast shared code.
4587 1.99 msaitoh * It feeds the shared code routine the addresses in the
4588 1.213 msaitoh * array of ixgbe_set_rxfilter() one by one.
4589 1.99 msaitoh ************************************************************************/
4590 1.98 msaitoh static u8 *
4591 1.98 msaitoh ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4592 1.98 msaitoh {
4593 1.98 msaitoh struct ixgbe_mc_addr *mta;
4594 1.1 dyoung
4595 1.98 msaitoh mta = (struct ixgbe_mc_addr *)*update_ptr;
4596 1.98 msaitoh *vmdq = mta->vmdq;
4597 1.33 msaitoh
4598 1.98 msaitoh *update_ptr = (u8*)(mta + 1);
4599 1.99 msaitoh
4600 1.98 msaitoh return (mta->addr);
4601 1.99 msaitoh } /* ixgbe_mc_array_itr */
4602 1.82 msaitoh
4603 1.99 msaitoh /************************************************************************
4604 1.99 msaitoh * ixgbe_local_timer - Timer routine
4605 1.98 msaitoh *
4606 1.99 msaitoh * Checks for link status, updates statistics,
4607 1.99 msaitoh * and runs the watchdog check.
4608 1.99 msaitoh ************************************************************************/
4609 1.98 msaitoh static void
4610 1.98 msaitoh ixgbe_local_timer(void *arg)
4611 1.98 msaitoh {
4612 1.333 msaitoh struct ixgbe_softc *sc = arg;
4613 1.1 dyoung
4614 1.333 msaitoh if (sc->schedule_wqs_ok) {
4615 1.333 msaitoh if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0)
4616 1.333 msaitoh workqueue_enqueue(sc->timer_wq,
4617 1.333 msaitoh &sc->timer_wc, NULL);
4618 1.233 msaitoh }
4619 1.98 msaitoh }
4620 1.28 msaitoh
4621 1.98 msaitoh static void
4622 1.233 msaitoh ixgbe_handle_timer(struct work *wk, void *context)
4623 1.98 msaitoh {
4624 1.339 msaitoh struct ixgbe_softc *sc = context;
4625 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4626 1.333 msaitoh device_t dev = sc->dev;
4627 1.333 msaitoh struct ix_queue *que = sc->queues;
4628 1.153 msaitoh u64 queues = 0;
4629 1.134 msaitoh u64 v0, v1, v2, v3, v4, v5, v6, v7;
4630 1.153 msaitoh int hung = 0;
4631 1.134 msaitoh int i;
4632 1.1 dyoung
4633 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4634 1.1 dyoung
4635 1.98 msaitoh /* Check for pluggable optics */
4636 1.237 msaitoh if (ixgbe_is_sfp(hw)) {
4637 1.249 msaitoh bool sched_mod_task = false;
4638 1.237 msaitoh
4639 1.249 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) {
4640 1.249 msaitoh /*
4641 1.249 msaitoh * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4642 1.250 msaitoh * any GPIO(SDP). So just schedule TASK_MOD.
4643 1.249 msaitoh */
4644 1.249 msaitoh sched_mod_task = true;
4645 1.249 msaitoh } else {
4646 1.249 msaitoh bool was_full, is_full;
4647 1.249 msaitoh
4648 1.249 msaitoh was_full =
4649 1.249 msaitoh hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4650 1.251 msaitoh is_full = ixgbe_sfp_cage_full(hw);
4651 1.249 msaitoh
4652 1.249 msaitoh /* Do probe if cage state changed */
4653 1.249 msaitoh if (was_full ^ is_full)
4654 1.249 msaitoh sched_mod_task = true;
4655 1.249 msaitoh }
4656 1.249 msaitoh if (sched_mod_task) {
4657 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4658 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4659 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc);
4660 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4661 1.239 msaitoh }
4662 1.237 msaitoh }
4663 1.1 dyoung
4664 1.333 msaitoh ixgbe_update_link_status(sc);
4665 1.333 msaitoh ixgbe_update_stats_counters(sc);
4666 1.33 msaitoh
4667 1.134 msaitoh /* Update some event counters */
4668 1.134 msaitoh v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4669 1.333 msaitoh que = sc->queues;
4670 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4671 1.186 msaitoh struct tx_ring *txr = que->txr;
4672 1.134 msaitoh
4673 1.134 msaitoh v0 += txr->q_efbig_tx_dma_setup;
4674 1.134 msaitoh v1 += txr->q_mbuf_defrag_failed;
4675 1.134 msaitoh v2 += txr->q_efbig2_tx_dma_setup;
4676 1.134 msaitoh v3 += txr->q_einval_tx_dma_setup;
4677 1.134 msaitoh v4 += txr->q_other_tx_dma_setup;
4678 1.134 msaitoh v5 += txr->q_eagain_tx_dma_setup;
4679 1.134 msaitoh v6 += txr->q_enomem_tx_dma_setup;
4680 1.134 msaitoh v7 += txr->q_tso_err;
4681 1.134 msaitoh }
4682 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0);
4683 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1);
4684 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2);
4685 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3);
4686 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4);
4687 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5);
4688 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6);
4689 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, v7);
4690 1.134 msaitoh
4691 1.153 msaitoh /*
4692 1.153 msaitoh * Check the TX queues status
4693 1.186 msaitoh * - mark hung queues so we don't schedule on them
4694 1.186 msaitoh * - watchdog only if all queues show hung
4695 1.153 msaitoh */
4696 1.333 msaitoh que = sc->queues;
4697 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4698 1.153 msaitoh /* Keep track of queues with work for soft irq */
4699 1.153 msaitoh if (que->txr->busy)
4700 1.190 msaitoh queues |= 1ULL << que->me;
4701 1.153 msaitoh /*
4702 1.153 msaitoh * Each time txeof runs without cleaning, but there
4703 1.153 msaitoh * are uncleaned descriptors it increments busy. If
4704 1.153 msaitoh * we get to the MAX we declare it hung.
4705 1.153 msaitoh */
4706 1.153 msaitoh if (que->busy == IXGBE_QUEUE_HUNG) {
4707 1.153 msaitoh ++hung;
4708 1.153 msaitoh /* Mark the queue as inactive */
4709 1.333 msaitoh sc->active_queues &= ~(1ULL << que->me);
4710 1.153 msaitoh continue;
4711 1.153 msaitoh } else {
4712 1.153 msaitoh /* Check if we've come back from hung */
4713 1.333 msaitoh if ((sc->active_queues & (1ULL << que->me)) == 0)
4714 1.333 msaitoh sc->active_queues |= 1ULL << que->me;
4715 1.153 msaitoh }
4716 1.153 msaitoh if (que->busy >= IXGBE_MAX_TX_BUSY) {
4717 1.153 msaitoh device_printf(dev,
4718 1.153 msaitoh "Warning queue %d appears to be hung!\n", i);
4719 1.153 msaitoh que->txr->busy = IXGBE_QUEUE_HUNG;
4720 1.153 msaitoh ++hung;
4721 1.153 msaitoh }
4722 1.150 msaitoh }
4723 1.150 msaitoh
4724 1.232 msaitoh /* Only truly watchdog if all queues show hung */
4725 1.333 msaitoh if (hung == sc->num_queues)
4726 1.153 msaitoh goto watchdog;
4727 1.160 msaitoh #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4728 1.153 msaitoh else if (queues != 0) { /* Force an IRQ on queues with work */
4729 1.333 msaitoh que = sc->queues;
4730 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) {
4731 1.139 knakahar mutex_enter(&que->dc_mtx);
4732 1.153 msaitoh if (que->disabled_count == 0)
4733 1.333 msaitoh ixgbe_rearm_queues(sc,
4734 1.153 msaitoh queues & ((u64)1 << i));
4735 1.139 knakahar mutex_exit(&que->dc_mtx);
4736 1.131 knakahar }
4737 1.98 msaitoh }
4738 1.160 msaitoh #endif
4739 1.150 msaitoh
4740 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
4741 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4742 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc);
4743 1.153 msaitoh return;
4744 1.1 dyoung
4745 1.153 msaitoh watchdog:
4746 1.333 msaitoh device_printf(sc->dev, "Watchdog timeout -- resetting\n");
4747 1.333 msaitoh sc->ifp->if_flags &= ~IFF_RUNNING;
4748 1.333 msaitoh IXGBE_EVC_ADD(&sc->watchdog_events, 1);
4749 1.333 msaitoh ixgbe_init_locked(sc);
4750 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4751 1.233 msaitoh } /* ixgbe_handle_timer */
4752 1.43 msaitoh
4753 1.99 msaitoh /************************************************************************
4754 1.169 msaitoh * ixgbe_recovery_mode_timer - Recovery mode timer routine
4755 1.169 msaitoh ************************************************************************/
4756 1.169 msaitoh static void
4757 1.169 msaitoh ixgbe_recovery_mode_timer(void *arg)
4758 1.169 msaitoh {
4759 1.333 msaitoh struct ixgbe_softc *sc = arg;
4760 1.233 msaitoh
4761 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) {
4762 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode_timer_pending,
4763 1.254 msaitoh 0, 1) == 0) {
4764 1.333 msaitoh workqueue_enqueue(sc->recovery_mode_timer_wq,
4765 1.333 msaitoh &sc->recovery_mode_timer_wc, NULL);
4766 1.254 msaitoh }
4767 1.233 msaitoh }
4768 1.233 msaitoh }
4769 1.233 msaitoh
4770 1.233 msaitoh static void
4771 1.233 msaitoh ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4772 1.233 msaitoh {
4773 1.333 msaitoh struct ixgbe_softc *sc = context;
4774 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4775 1.169 msaitoh
4776 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4777 1.169 msaitoh if (ixgbe_fw_recovery_mode(hw)) {
4778 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode, 0, 1) == 0) {
4779 1.169 msaitoh /* Firmware error detected, entering recovery mode */
4780 1.333 msaitoh device_printf(sc->dev,
4781 1.319 msaitoh "Firmware recovery mode detected. Limiting "
4782 1.319 msaitoh "functionality. Refer to the Intel(R) Ethernet "
4783 1.319 msaitoh "Adapters and Devices User Guide for details on "
4784 1.319 msaitoh "firmware recovery mode.\n");
4785 1.169 msaitoh
4786 1.169 msaitoh if (hw->adapter_stopped == FALSE)
4787 1.333 msaitoh ixgbe_stop_locked(sc);
4788 1.169 msaitoh }
4789 1.169 msaitoh } else
4790 1.333 msaitoh atomic_cas_uint(&sc->recovery_mode, 1, 0);
4791 1.169 msaitoh
4792 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0);
4793 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz,
4794 1.333 msaitoh ixgbe_recovery_mode_timer, sc);
4795 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4796 1.233 msaitoh } /* ixgbe_handle_recovery_mode_timer */
4797 1.169 msaitoh
4798 1.169 msaitoh /************************************************************************
4799 1.99 msaitoh * ixgbe_handle_mod - Tasklet for SFP module interrupts
4800 1.273 msaitoh * bool int_en: true if it's called when the interrupt is enabled.
4801 1.99 msaitoh ************************************************************************/
4802 1.1 dyoung static void
4803 1.273 msaitoh ixgbe_handle_mod(void *context, bool int_en)
4804 1.1 dyoung {
4805 1.339 msaitoh struct ixgbe_softc *sc = context;
4806 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4807 1.333 msaitoh device_t dev = sc->dev;
4808 1.249 msaitoh enum ixgbe_sfp_type last_sfp_type;
4809 1.251 msaitoh u32 err;
4810 1.249 msaitoh bool last_unsupported_sfp_recovery;
4811 1.98 msaitoh
4812 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4813 1.257 msaitoh
4814 1.249 msaitoh last_sfp_type = hw->phy.sfp_type;
4815 1.249 msaitoh last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4816 1.333 msaitoh IXGBE_EVC_ADD(&sc->mod_workev, 1);
4817 1.333 msaitoh if (sc->hw.need_crosstalk_fix) {
4818 1.251 msaitoh if ((hw->mac.type != ixgbe_mac_82598EB) &&
4819 1.251 msaitoh !ixgbe_sfp_cage_full(hw))
4820 1.218 msaitoh goto out;
4821 1.98 msaitoh }
4822 1.98 msaitoh
4823 1.98 msaitoh err = hw->phy.ops.identify_sfp(hw);
4824 1.98 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4825 1.249 msaitoh if (last_unsupported_sfp_recovery == false)
4826 1.249 msaitoh device_printf(dev,
4827 1.249 msaitoh "Unsupported SFP+ module type was detected.\n");
4828 1.218 msaitoh goto out;
4829 1.33 msaitoh }
4830 1.33 msaitoh
4831 1.219 msaitoh if (hw->need_unsupported_sfp_recovery) {
4832 1.219 msaitoh device_printf(dev, "Recovering from unsupported SFP\n");
4833 1.219 msaitoh /*
4834 1.219 msaitoh * We could recover the status by calling setup_sfp(),
4835 1.219 msaitoh * setup_link() and some others. It's complex and might not
4836 1.219 msaitoh * work correctly on some unknown cases. To avoid such type of
4837 1.219 msaitoh * problem, call ixgbe_init_locked(). It's simple and safe
4838 1.219 msaitoh * approach.
4839 1.219 msaitoh */
4840 1.333 msaitoh ixgbe_init_locked(sc);
4841 1.249 msaitoh } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4842 1.249 msaitoh (hw->phy.sfp_type != last_sfp_type)) {
4843 1.249 msaitoh /* A module is inserted and changed. */
4844 1.249 msaitoh
4845 1.219 msaitoh if (hw->mac.type == ixgbe_mac_82598EB)
4846 1.219 msaitoh err = hw->phy.ops.reset(hw);
4847 1.219 msaitoh else {
4848 1.219 msaitoh err = hw->mac.ops.setup_sfp(hw);
4849 1.219 msaitoh hw->phy.sfp_setup_needed = FALSE;
4850 1.219 msaitoh }
4851 1.219 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4852 1.219 msaitoh device_printf(dev,
4853 1.219 msaitoh "Setup failure - unsupported SFP+ module type.\n");
4854 1.219 msaitoh goto out;
4855 1.219 msaitoh }
4856 1.1 dyoung }
4857 1.233 msaitoh
4858 1.218 msaitoh out:
4859 1.233 msaitoh /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4860 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
4861 1.233 msaitoh
4862 1.233 msaitoh /* Adjust media types shown in ifconfig */
4863 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4864 1.333 msaitoh ifmedia_removeall(&sc->media);
4865 1.333 msaitoh ixgbe_add_media_types(sc);
4866 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
4867 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4868 1.233 msaitoh
4869 1.249 msaitoh /*
4870 1.288 andvar * Don't schedule MSF event if the chip is 82598. 82598 doesn't support
4871 1.249 msaitoh * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4872 1.250 msaitoh * flap because the function calls setup_link().
4873 1.249 msaitoh */
4874 1.260 knakahar if (hw->mac.type != ixgbe_mac_82598EB) {
4875 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4876 1.273 msaitoh if (int_en)
4877 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4878 1.273 msaitoh else
4879 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4880 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4881 1.260 knakahar }
4882 1.249 msaitoh
4883 1.233 msaitoh /*
4884 1.233 msaitoh * Don't call ixgbe_schedule_admin_tasklet() because we are on
4885 1.233 msaitoh * the workqueue now.
4886 1.233 msaitoh */
4887 1.99 msaitoh } /* ixgbe_handle_mod */
4888 1.1 dyoung
4889 1.1 dyoung
4890 1.99 msaitoh /************************************************************************
4891 1.99 msaitoh * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4892 1.99 msaitoh ************************************************************************/
4893 1.33 msaitoh static void
4894 1.233 msaitoh ixgbe_handle_msf(void *context)
4895 1.33 msaitoh {
4896 1.339 msaitoh struct ixgbe_softc *sc = context;
4897 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4898 1.186 msaitoh u32 autoneg;
4899 1.186 msaitoh bool negotiate;
4900 1.33 msaitoh
4901 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4902 1.257 msaitoh
4903 1.333 msaitoh IXGBE_EVC_ADD(&sc->msf_workev, 1);
4904 1.33 msaitoh
4905 1.98 msaitoh autoneg = hw->phy.autoneg_advertised;
4906 1.98 msaitoh if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4907 1.98 msaitoh hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4908 1.98 msaitoh if (hw->mac.ops.setup_link)
4909 1.98 msaitoh hw->mac.ops.setup_link(hw, autoneg, TRUE);
4910 1.99 msaitoh } /* ixgbe_handle_msf */
4911 1.33 msaitoh
4912 1.99 msaitoh /************************************************************************
4913 1.99 msaitoh * ixgbe_handle_phy - Tasklet for external PHY interrupts
4914 1.99 msaitoh ************************************************************************/
4915 1.1 dyoung static void
4916 1.98 msaitoh ixgbe_handle_phy(void *context)
4917 1.1 dyoung {
4918 1.339 msaitoh struct ixgbe_softc *sc = context;
4919 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4920 1.98 msaitoh int error;
4921 1.1 dyoung
4922 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
4923 1.257 msaitoh
4924 1.333 msaitoh IXGBE_EVC_ADD(&sc->phy_workev, 1);
4925 1.98 msaitoh error = hw->phy.ops.handle_lasi(hw);
4926 1.98 msaitoh if (error == IXGBE_ERR_OVERTEMP)
4927 1.333 msaitoh device_printf(sc->dev,
4928 1.98 msaitoh "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4929 1.98 msaitoh " PHY will downshift to lower power state!\n");
4930 1.98 msaitoh else if (error)
4931 1.333 msaitoh device_printf(sc->dev,
4932 1.99 msaitoh "Error handling LASI interrupt: %d\n", error);
4933 1.99 msaitoh } /* ixgbe_handle_phy */
4934 1.1 dyoung
4935 1.98 msaitoh static void
4936 1.233 msaitoh ixgbe_handle_admin(struct work *wk, void *context)
4937 1.233 msaitoh {
4938 1.339 msaitoh struct ixgbe_softc *sc = context;
4939 1.333 msaitoh struct ifnet *ifp = sc->ifp;
4940 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
4941 1.260 knakahar u32 task_requests;
4942 1.273 msaitoh u32 eims_enable = 0;
4943 1.260 knakahar
4944 1.333 msaitoh mutex_enter(&sc->admin_mtx);
4945 1.333 msaitoh sc->admin_pending = 0;
4946 1.333 msaitoh task_requests = sc->task_requests;
4947 1.333 msaitoh sc->task_requests = 0;
4948 1.333 msaitoh mutex_exit(&sc->admin_mtx);
4949 1.233 msaitoh
4950 1.233 msaitoh /*
4951 1.233 msaitoh * Hold the IFNET_LOCK across this entire call. This will
4952 1.333 msaitoh * prevent additional changes to sc->phy_layer
4953 1.233 msaitoh * and serialize calls to this tasklet. We cannot hold the
4954 1.233 msaitoh * CORE_LOCK while calling into the ifmedia functions as
4955 1.233 msaitoh * they call ifmedia_lock() and the lock is CORE_LOCK.
4956 1.233 msaitoh */
4957 1.233 msaitoh IFNET_LOCK(ifp);
4958 1.333 msaitoh IXGBE_CORE_LOCK(sc);
4959 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4960 1.333 msaitoh ixgbe_handle_link(sc);
4961 1.273 msaitoh eims_enable |= IXGBE_EIMS_LSC;
4962 1.273 msaitoh }
4963 1.319 msaitoh if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0)
4964 1.333 msaitoh ixgbe_handle_mod(sc, false);
4965 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4966 1.333 msaitoh ixgbe_handle_mod(sc, true);
4967 1.273 msaitoh if (hw->mac.type >= ixgbe_mac_X540)
4968 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4969 1.273 msaitoh else
4970 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4971 1.260 knakahar }
4972 1.273 msaitoh if ((task_requests
4973 1.273 msaitoh & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4974 1.333 msaitoh ixgbe_handle_msf(sc);
4975 1.273 msaitoh if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4976 1.273 msaitoh (hw->mac.type == ixgbe_mac_82599EB))
4977 1.273 msaitoh eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4978 1.260 knakahar }
4979 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4980 1.333 msaitoh ixgbe_handle_phy(sc);
4981 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4982 1.260 knakahar }
4983 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4984 1.333 msaitoh ixgbe_reinit_fdir(sc);
4985 1.273 msaitoh eims_enable |= IXGBE_EIMS_FLOW_DIR;
4986 1.260 knakahar }
4987 1.233 msaitoh #if 0 /* notyet */
4988 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4989 1.333 msaitoh ixgbe_handle_mbx(sc);
4990 1.273 msaitoh eims_enable |= IXGBE_EIMS_MAILBOX;
4991 1.260 knakahar }
4992 1.233 msaitoh #endif
4993 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4994 1.233 msaitoh
4995 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
4996 1.233 msaitoh IFNET_UNLOCK(ifp);
4997 1.233 msaitoh } /* ixgbe_handle_admin */
4998 1.233 msaitoh
4999 1.233 msaitoh static void
5000 1.98 msaitoh ixgbe_ifstop(struct ifnet *ifp, int disable)
5001 1.98 msaitoh {
5002 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
5003 1.1 dyoung
5004 1.333 msaitoh IXGBE_CORE_LOCK(sc);
5005 1.333 msaitoh ixgbe_stop_locked(sc);
5006 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
5007 1.223 thorpej
5008 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc);
5009 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0);
5010 1.98 msaitoh }
5011 1.1 dyoung
5012 1.99 msaitoh /************************************************************************
5013 1.252 msaitoh * ixgbe_stop_locked - Stop the hardware
5014 1.98 msaitoh *
5015 1.99 msaitoh * Disables all traffic on the adapter by issuing a
5016 1.99 msaitoh * global reset on the MAC and deallocates TX/RX buffers.
5017 1.99 msaitoh ************************************************************************/
5018 1.1 dyoung static void
5019 1.252 msaitoh ixgbe_stop_locked(void *arg)
5020 1.1 dyoung {
5021 1.186 msaitoh struct ifnet *ifp;
5022 1.339 msaitoh struct ixgbe_softc *sc = arg;
5023 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5024 1.99 msaitoh
5025 1.333 msaitoh ifp = sc->ifp;
5026 1.98 msaitoh
5027 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5028 1.98 msaitoh
5029 1.252 msaitoh INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
5030 1.333 msaitoh ixgbe_disable_intr(sc);
5031 1.333 msaitoh callout_stop(&sc->timer);
5032 1.98 msaitoh
5033 1.223 thorpej /* Don't schedule workqueues. */
5034 1.333 msaitoh sc->schedule_wqs_ok = false;
5035 1.223 thorpej
5036 1.98 msaitoh /* Let the stack know...*/
5037 1.98 msaitoh ifp->if_flags &= ~IFF_RUNNING;
5038 1.98 msaitoh
5039 1.98 msaitoh ixgbe_reset_hw(hw);
5040 1.98 msaitoh hw->adapter_stopped = FALSE;
5041 1.98 msaitoh ixgbe_stop_adapter(hw);
5042 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82599EB)
5043 1.98 msaitoh ixgbe_stop_mac_link_on_d3_82599(hw);
5044 1.98 msaitoh /* Turn off the laser - noop with no optics */
5045 1.98 msaitoh ixgbe_disable_tx_laser(hw);
5046 1.1 dyoung
5047 1.98 msaitoh /* Update the stack */
5048 1.333 msaitoh sc->link_up = FALSE;
5049 1.333 msaitoh ixgbe_update_link_status(sc);
5050 1.1 dyoung
5051 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */
5052 1.333 msaitoh ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
5053 1.1 dyoung
5054 1.98 msaitoh return;
5055 1.252 msaitoh } /* ixgbe_stop_locked */
5056 1.1 dyoung
5057 1.99 msaitoh /************************************************************************
5058 1.99 msaitoh * ixgbe_update_link_status - Update OS on link state
5059 1.99 msaitoh *
5060 1.99 msaitoh * Note: Only updates the OS on the cached link state.
5061 1.186 msaitoh * The real check of the hardware only happens with
5062 1.186 msaitoh * a link interrupt.
5063 1.99 msaitoh ************************************************************************/
5064 1.98 msaitoh static void
5065 1.333 msaitoh ixgbe_update_link_status(struct ixgbe_softc *sc)
5066 1.1 dyoung {
5067 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5068 1.333 msaitoh device_t dev = sc->dev;
5069 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5070 1.98 msaitoh
5071 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
5072 1.136 knakahar
5073 1.333 msaitoh if (sc->link_up) {
5074 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) {
5075 1.138 knakahar /*
5076 1.138 knakahar * To eliminate influence of the previous state
5077 1.138 knakahar * in the same way as ixgbe_init_locked().
5078 1.138 knakahar */
5079 1.333 msaitoh struct ix_queue *que = sc->queues;
5080 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5081 1.138 knakahar que->eitr_setting = 0;
5082 1.138 knakahar
5083 1.344 msaitoh if (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL) {
5084 1.98 msaitoh /*
5085 1.98 msaitoh * Discard count for both MAC Local Fault and
5086 1.98 msaitoh * Remote Fault because those registers are
5087 1.98 msaitoh * valid only when the link speed is up and
5088 1.98 msaitoh * 10Gbps.
5089 1.98 msaitoh */
5090 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MLFC);
5091 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MRFC);
5092 1.98 msaitoh }
5093 1.98 msaitoh
5094 1.98 msaitoh if (bootverbose) {
5095 1.98 msaitoh const char *bpsmsg;
5096 1.1 dyoung
5097 1.333 msaitoh switch (sc->link_speed) {
5098 1.98 msaitoh case IXGBE_LINK_SPEED_10GB_FULL:
5099 1.98 msaitoh bpsmsg = "10 Gbps";
5100 1.98 msaitoh break;
5101 1.98 msaitoh case IXGBE_LINK_SPEED_5GB_FULL:
5102 1.98 msaitoh bpsmsg = "5 Gbps";
5103 1.98 msaitoh break;
5104 1.98 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL:
5105 1.98 msaitoh bpsmsg = "2.5 Gbps";
5106 1.98 msaitoh break;
5107 1.98 msaitoh case IXGBE_LINK_SPEED_1GB_FULL:
5108 1.98 msaitoh bpsmsg = "1 Gbps";
5109 1.98 msaitoh break;
5110 1.98 msaitoh case IXGBE_LINK_SPEED_100_FULL:
5111 1.98 msaitoh bpsmsg = "100 Mbps";
5112 1.98 msaitoh break;
5113 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL:
5114 1.99 msaitoh bpsmsg = "10 Mbps";
5115 1.99 msaitoh break;
5116 1.98 msaitoh default:
5117 1.98 msaitoh bpsmsg = "unknown speed";
5118 1.98 msaitoh break;
5119 1.98 msaitoh }
5120 1.98 msaitoh device_printf(dev, "Link is up %s %s \n",
5121 1.98 msaitoh bpsmsg, "Full Duplex");
5122 1.98 msaitoh }
5123 1.333 msaitoh sc->link_active = LINK_STATE_UP;
5124 1.98 msaitoh /* Update any Flow Control changes */
5125 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5126 1.98 msaitoh /* Update DMA coalescing config */
5127 1.333 msaitoh ixgbe_config_dmac(sc);
5128 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_UP);
5129 1.144 msaitoh
5130 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5131 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5132 1.98 msaitoh }
5133 1.174 msaitoh } else {
5134 1.174 msaitoh /*
5135 1.174 msaitoh * Do it when link active changes to DOWN. i.e.
5136 1.174 msaitoh * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5137 1.186 msaitoh * b) LINK_STATE_UP -> LINK_STATE_DOWN
5138 1.174 msaitoh */
5139 1.333 msaitoh if (sc->link_active != LINK_STATE_DOWN) {
5140 1.98 msaitoh if (bootverbose)
5141 1.98 msaitoh device_printf(dev, "Link is Down\n");
5142 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_DOWN);
5143 1.333 msaitoh sc->link_active = LINK_STATE_DOWN;
5144 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5145 1.333 msaitoh ixgbe_ping_all_vfs(sc);
5146 1.333 msaitoh ixgbe_drain_all(sc);
5147 1.98 msaitoh }
5148 1.1 dyoung }
5149 1.99 msaitoh } /* ixgbe_update_link_status */
5150 1.1 dyoung
5151 1.99 msaitoh /************************************************************************
5152 1.99 msaitoh * ixgbe_config_dmac - Configure DMA Coalescing
5153 1.99 msaitoh ************************************************************************/
5154 1.1 dyoung static void
5155 1.333 msaitoh ixgbe_config_dmac(struct ixgbe_softc *sc)
5156 1.1 dyoung {
5157 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5158 1.98 msaitoh struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5159 1.1 dyoung
5160 1.99 msaitoh if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5161 1.98 msaitoh return;
5162 1.65 msaitoh
5163 1.333 msaitoh if (dcfg->watchdog_timer ^ sc->dmac ||
5164 1.333 msaitoh dcfg->link_speed ^ sc->link_speed) {
5165 1.333 msaitoh dcfg->watchdog_timer = sc->dmac;
5166 1.98 msaitoh dcfg->fcoe_en = false;
5167 1.333 msaitoh dcfg->link_speed = sc->link_speed;
5168 1.98 msaitoh dcfg->num_tcs = 1;
5169 1.51 msaitoh
5170 1.98 msaitoh INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5171 1.98 msaitoh dcfg->watchdog_timer, dcfg->link_speed);
5172 1.51 msaitoh
5173 1.98 msaitoh hw->mac.ops.dmac_config(hw);
5174 1.98 msaitoh }
5175 1.99 msaitoh } /* ixgbe_config_dmac */
5176 1.51 msaitoh
5177 1.99 msaitoh /************************************************************************
5178 1.99 msaitoh * ixgbe_enable_intr
5179 1.99 msaitoh ************************************************************************/
5180 1.98 msaitoh static void
5181 1.333 msaitoh ixgbe_enable_intr(struct ixgbe_softc *sc)
5182 1.98 msaitoh {
5183 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5184 1.333 msaitoh struct ix_queue *que = sc->queues;
5185 1.98 msaitoh u32 mask, fwsm;
5186 1.51 msaitoh
5187 1.98 msaitoh mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5188 1.45 msaitoh
5189 1.333 msaitoh switch (sc->hw.mac.type) {
5190 1.99 msaitoh case ixgbe_mac_82599EB:
5191 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5192 1.99 msaitoh /* Temperature sensor on some adapters */
5193 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0;
5194 1.99 msaitoh /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5195 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5196 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP2;
5197 1.99 msaitoh break;
5198 1.99 msaitoh case ixgbe_mac_X540:
5199 1.99 msaitoh /* Detect if Thermal Sensor is enabled */
5200 1.99 msaitoh fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5201 1.99 msaitoh if (fwsm & IXGBE_FWSM_TS_ENABLED)
5202 1.98 msaitoh mask |= IXGBE_EIMS_TS;
5203 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5204 1.99 msaitoh break;
5205 1.99 msaitoh case ixgbe_mac_X550:
5206 1.99 msaitoh /* MAC thermal sensor is automatically enabled */
5207 1.99 msaitoh mask |= IXGBE_EIMS_TS;
5208 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5209 1.99 msaitoh break;
5210 1.99 msaitoh case ixgbe_mac_X550EM_x:
5211 1.99 msaitoh case ixgbe_mac_X550EM_a:
5212 1.99 msaitoh /* Some devices use SDP0 for important information */
5213 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5214 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5215 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5216 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5217 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5218 1.99 msaitoh if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5219 1.99 msaitoh mask |= IXGBE_EICR_GPI_SDP0_X540;
5220 1.99 msaitoh mask |= IXGBE_EIMS_ECC;
5221 1.99 msaitoh break;
5222 1.99 msaitoh default:
5223 1.99 msaitoh break;
5224 1.1 dyoung }
5225 1.51 msaitoh
5226 1.99 msaitoh /* Enable Fan Failure detection */
5227 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
5228 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1;
5229 1.99 msaitoh /* Enable SR-IOV */
5230 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV)
5231 1.99 msaitoh mask |= IXGBE_EIMS_MAILBOX;
5232 1.99 msaitoh /* Enable Flow Director */
5233 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FDIR)
5234 1.99 msaitoh mask |= IXGBE_EIMS_FLOW_DIR;
5235 1.99 msaitoh
5236 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5237 1.64 msaitoh
5238 1.98 msaitoh /* With MSI-X we use auto clear */
5239 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0) {
5240 1.270 msaitoh /*
5241 1.309 msaitoh * We use auto clear for RTX_QUEUE only. Don't use other
5242 1.309 msaitoh * interrupts (e.g. link interrupt). BTW, we don't use
5243 1.309 msaitoh * TCP_TIMER interrupt itself.
5244 1.270 msaitoh */
5245 1.270 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5246 1.98 msaitoh }
5247 1.1 dyoung
5248 1.98 msaitoh /*
5249 1.99 msaitoh * Now enable all queues, this is done separately to
5250 1.99 msaitoh * allow for handling the extended (beyond 32) MSI-X
5251 1.99 msaitoh * vectors that can be used by 82599
5252 1.99 msaitoh */
5253 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5254 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
5255 1.1 dyoung
5256 1.98 msaitoh IXGBE_WRITE_FLUSH(hw);
5257 1.43 msaitoh
5258 1.99 msaitoh } /* ixgbe_enable_intr */
5259 1.1 dyoung
5260 1.99 msaitoh /************************************************************************
5261 1.139 knakahar * ixgbe_disable_intr_internal
5262 1.99 msaitoh ************************************************************************/
5263 1.44 msaitoh static void
5264 1.333 msaitoh ixgbe_disable_intr_internal(struct ixgbe_softc *sc, bool nestok)
5265 1.44 msaitoh {
5266 1.333 msaitoh struct ix_queue *que = sc->queues;
5267 1.127 knakahar
5268 1.127 knakahar /* disable interrupts other than queues */
5269 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5270 1.127 knakahar
5271 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0)
5272 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
5273 1.127 knakahar
5274 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++)
5275 1.333 msaitoh ixgbe_disable_queue_internal(sc, que->msix, nestok);
5276 1.127 knakahar
5277 1.333 msaitoh IXGBE_WRITE_FLUSH(&sc->hw);
5278 1.99 msaitoh
5279 1.139 knakahar } /* ixgbe_do_disable_intr_internal */
5280 1.139 knakahar
5281 1.139 knakahar /************************************************************************
5282 1.139 knakahar * ixgbe_disable_intr
5283 1.139 knakahar ************************************************************************/
5284 1.139 knakahar static void
5285 1.333 msaitoh ixgbe_disable_intr(struct ixgbe_softc *sc)
5286 1.139 knakahar {
5287 1.139 knakahar
5288 1.333 msaitoh ixgbe_disable_intr_internal(sc, true);
5289 1.99 msaitoh } /* ixgbe_disable_intr */
5290 1.98 msaitoh
5291 1.99 msaitoh /************************************************************************
5292 1.139 knakahar * ixgbe_ensure_disabled_intr
5293 1.139 knakahar ************************************************************************/
5294 1.139 knakahar void
5295 1.333 msaitoh ixgbe_ensure_disabled_intr(struct ixgbe_softc *sc)
5296 1.139 knakahar {
5297 1.139 knakahar
5298 1.333 msaitoh ixgbe_disable_intr_internal(sc, false);
5299 1.139 knakahar } /* ixgbe_ensure_disabled_intr */
5300 1.139 knakahar
5301 1.139 knakahar /************************************************************************
5302 1.99 msaitoh * ixgbe_legacy_irq - Legacy Interrupt Service routine
5303 1.99 msaitoh ************************************************************************/
5304 1.98 msaitoh static int
5305 1.98 msaitoh ixgbe_legacy_irq(void *arg)
5306 1.1 dyoung {
5307 1.98 msaitoh struct ix_queue *que = arg;
5308 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
5309 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5310 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5311 1.341 msaitoh struct tx_ring *txr = sc->tx_rings;
5312 1.277 msaitoh u32 eicr;
5313 1.269 msaitoh u32 eims_orig;
5314 1.273 msaitoh u32 eims_enable = 0;
5315 1.273 msaitoh u32 eims_disable = 0;
5316 1.98 msaitoh
5317 1.269 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5318 1.269 msaitoh /*
5319 1.269 msaitoh * Silicon errata #26 on 82598. Disable all interrupts before reading
5320 1.269 msaitoh * EICR.
5321 1.269 msaitoh */
5322 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5323 1.98 msaitoh
5324 1.268 msaitoh /* Read and clear EICR */
5325 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5326 1.44 msaitoh
5327 1.99 msaitoh if (eicr == 0) {
5328 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.intzero, 1);
5329 1.269 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5330 1.98 msaitoh return 0;
5331 1.98 msaitoh }
5332 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.legint, 1);
5333 1.44 msaitoh
5334 1.272 msaitoh /* Queue (0) intr */
5335 1.308 msaitoh if (((ifp->if_flags & IFF_RUNNING) != 0) &&
5336 1.308 msaitoh (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5337 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1);
5338 1.272 msaitoh
5339 1.147 knakahar /*
5340 1.265 msaitoh * The same as ixgbe_msix_que() about
5341 1.265 msaitoh * "que->txrx_use_workqueue".
5342 1.147 knakahar */
5343 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue;
5344 1.147 knakahar
5345 1.98 msaitoh IXGBE_TX_LOCK(txr);
5346 1.98 msaitoh ixgbe_txeof(txr);
5347 1.99 msaitoh #ifdef notyet
5348 1.99 msaitoh if (!ixgbe_ring_empty(ifp, txr->br))
5349 1.99 msaitoh ixgbe_start_locked(ifp, txr);
5350 1.99 msaitoh #endif
5351 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
5352 1.271 msaitoh
5353 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
5354 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
5355 1.273 msaitoh /* Disable queue 0 interrupt */
5356 1.273 msaitoh eims_disable |= 1UL << 0;
5357 1.273 msaitoh } else
5358 1.317 msaitoh eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE;
5359 1.44 msaitoh
5360 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable);
5361 1.233 msaitoh
5362 1.273 msaitoh /* Re-enable some interrupts */
5363 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5364 1.273 msaitoh (eims_orig & ~eims_disable) | eims_enable);
5365 1.99 msaitoh
5366 1.98 msaitoh return 1;
5367 1.99 msaitoh } /* ixgbe_legacy_irq */
5368 1.98 msaitoh
5369 1.99 msaitoh /************************************************************************
5370 1.119 msaitoh * ixgbe_free_pciintr_resources
5371 1.99 msaitoh ************************************************************************/
5372 1.98 msaitoh static void
5373 1.333 msaitoh ixgbe_free_pciintr_resources(struct ixgbe_softc *sc)
5374 1.44 msaitoh {
5375 1.333 msaitoh struct ix_queue *que = sc->queues;
5376 1.98 msaitoh int rid;
5377 1.44 msaitoh
5378 1.98 msaitoh /*
5379 1.99 msaitoh * Release all msix queue resources:
5380 1.99 msaitoh */
5381 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) {
5382 1.119 msaitoh if (que->res != NULL) {
5383 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[i]);
5384 1.333 msaitoh sc->osdep.ihs[i] = NULL;
5385 1.119 msaitoh }
5386 1.58 msaitoh }
5387 1.58 msaitoh
5388 1.98 msaitoh /* Clean the Legacy or Link interrupt last */
5389 1.333 msaitoh if (sc->vector) /* we are doing MSIX */
5390 1.333 msaitoh rid = sc->vector;
5391 1.98 msaitoh else
5392 1.98 msaitoh rid = 0;
5393 1.44 msaitoh
5394 1.333 msaitoh if (sc->osdep.ihs[rid] != NULL) {
5395 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[rid]);
5396 1.333 msaitoh sc->osdep.ihs[rid] = NULL;
5397 1.98 msaitoh }
5398 1.44 msaitoh
5399 1.333 msaitoh if (sc->osdep.intrs != NULL) {
5400 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs,
5401 1.333 msaitoh sc->osdep.nintrs);
5402 1.333 msaitoh sc->osdep.intrs = NULL;
5403 1.119 msaitoh }
5404 1.119 msaitoh } /* ixgbe_free_pciintr_resources */
5405 1.119 msaitoh
5406 1.119 msaitoh /************************************************************************
5407 1.119 msaitoh * ixgbe_free_pci_resources
5408 1.119 msaitoh ************************************************************************/
5409 1.119 msaitoh static void
5410 1.333 msaitoh ixgbe_free_pci_resources(struct ixgbe_softc *sc)
5411 1.119 msaitoh {
5412 1.119 msaitoh
5413 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
5414 1.44 msaitoh
5415 1.333 msaitoh if (sc->osdep.mem_size != 0) {
5416 1.333 msaitoh bus_space_unmap(sc->osdep.mem_bus_space_tag,
5417 1.333 msaitoh sc->osdep.mem_bus_space_handle,
5418 1.333 msaitoh sc->osdep.mem_size);
5419 1.44 msaitoh }
5420 1.99 msaitoh } /* ixgbe_free_pci_resources */
5421 1.44 msaitoh
5422 1.99 msaitoh /************************************************************************
5423 1.99 msaitoh * ixgbe_sysctl_flowcntl
5424 1.99 msaitoh *
5425 1.99 msaitoh * SYSCTL wrapper around setting Flow Control
5426 1.99 msaitoh ************************************************************************/
5427 1.98 msaitoh static int
5428 1.98 msaitoh ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5429 1.98 msaitoh {
5430 1.98 msaitoh struct sysctlnode node = *rnode;
5431 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5432 1.99 msaitoh int error, fc;
5433 1.82 msaitoh
5434 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5435 1.169 msaitoh return (EPERM);
5436 1.169 msaitoh
5437 1.333 msaitoh fc = sc->hw.fc.current_mode;
5438 1.98 msaitoh node.sysctl_data = &fc;
5439 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5440 1.98 msaitoh if (error != 0 || newp == NULL)
5441 1.98 msaitoh return error;
5442 1.82 msaitoh
5443 1.98 msaitoh /* Don't bother if it's not changed */
5444 1.333 msaitoh if (fc == sc->hw.fc.current_mode)
5445 1.98 msaitoh return (0);
5446 1.83 msaitoh
5447 1.333 msaitoh return ixgbe_set_flowcntl(sc, fc);
5448 1.99 msaitoh } /* ixgbe_sysctl_flowcntl */
5449 1.1 dyoung
5450 1.99 msaitoh /************************************************************************
5451 1.99 msaitoh * ixgbe_set_flowcntl - Set flow control
5452 1.99 msaitoh *
5453 1.99 msaitoh * Flow control values:
5454 1.99 msaitoh * 0 - off
5455 1.99 msaitoh * 1 - rx pause
5456 1.99 msaitoh * 2 - tx pause
5457 1.99 msaitoh * 3 - full
5458 1.99 msaitoh ************************************************************************/
5459 1.98 msaitoh static int
5460 1.333 msaitoh ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
5461 1.98 msaitoh {
5462 1.98 msaitoh switch (fc) {
5463 1.98 msaitoh case ixgbe_fc_rx_pause:
5464 1.98 msaitoh case ixgbe_fc_tx_pause:
5465 1.98 msaitoh case ixgbe_fc_full:
5466 1.333 msaitoh sc->hw.fc.requested_mode = fc;
5467 1.333 msaitoh if (sc->num_queues > 1)
5468 1.333 msaitoh ixgbe_disable_rx_drop(sc);
5469 1.98 msaitoh break;
5470 1.98 msaitoh case ixgbe_fc_none:
5471 1.333 msaitoh sc->hw.fc.requested_mode = ixgbe_fc_none;
5472 1.333 msaitoh if (sc->num_queues > 1)
5473 1.333 msaitoh ixgbe_enable_rx_drop(sc);
5474 1.98 msaitoh break;
5475 1.98 msaitoh default:
5476 1.98 msaitoh return (EINVAL);
5477 1.1 dyoung }
5478 1.99 msaitoh
5479 1.98 msaitoh #if 0 /* XXX NetBSD */
5480 1.98 msaitoh /* Don't autoneg if forcing a value */
5481 1.333 msaitoh sc->hw.fc.disable_fc_autoneg = TRUE;
5482 1.98 msaitoh #endif
5483 1.333 msaitoh ixgbe_fc_enable(&sc->hw);
5484 1.99 msaitoh
5485 1.98 msaitoh return (0);
5486 1.99 msaitoh } /* ixgbe_set_flowcntl */
5487 1.1 dyoung
5488 1.99 msaitoh /************************************************************************
5489 1.99 msaitoh * ixgbe_enable_rx_drop
5490 1.99 msaitoh *
5491 1.99 msaitoh * Enable the hardware to drop packets when the buffer is
5492 1.99 msaitoh * full. This is useful with multiqueue, so that no single
5493 1.99 msaitoh * queue being full stalls the entire RX engine. We only
5494 1.99 msaitoh * enable this when Multiqueue is enabled AND Flow Control
5495 1.99 msaitoh * is disabled.
5496 1.99 msaitoh ************************************************************************/
5497 1.98 msaitoh static void
5498 1.333 msaitoh ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
5499 1.98 msaitoh {
5500 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5501 1.186 msaitoh struct rx_ring *rxr;
5502 1.186 msaitoh u32 srrctl;
5503 1.1 dyoung
5504 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5505 1.333 msaitoh rxr = &sc->rx_rings[i];
5506 1.99 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5507 1.99 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN;
5508 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5509 1.98 msaitoh }
5510 1.99 msaitoh
5511 1.98 msaitoh /* enable drop for each vf */
5512 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5513 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5514 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5515 1.98 msaitoh IXGBE_QDE_ENABLE));
5516 1.98 msaitoh }
5517 1.99 msaitoh } /* ixgbe_enable_rx_drop */
5518 1.43 msaitoh
5519 1.99 msaitoh /************************************************************************
5520 1.99 msaitoh * ixgbe_disable_rx_drop
5521 1.99 msaitoh ************************************************************************/
5522 1.98 msaitoh static void
5523 1.333 msaitoh ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
5524 1.98 msaitoh {
5525 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5526 1.186 msaitoh struct rx_ring *rxr;
5527 1.186 msaitoh u32 srrctl;
5528 1.43 msaitoh
5529 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) {
5530 1.333 msaitoh rxr = &sc->rx_rings[i];
5531 1.186 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5532 1.186 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5533 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5534 1.98 msaitoh }
5535 1.99 msaitoh
5536 1.98 msaitoh /* disable drop for each vf */
5537 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) {
5538 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE,
5539 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5540 1.1 dyoung }
5541 1.99 msaitoh } /* ixgbe_disable_rx_drop */
5542 1.98 msaitoh
5543 1.99 msaitoh /************************************************************************
5544 1.99 msaitoh * ixgbe_sysctl_advertise
5545 1.99 msaitoh *
5546 1.99 msaitoh * SYSCTL wrapper around setting advertised speed
5547 1.99 msaitoh ************************************************************************/
5548 1.98 msaitoh static int
5549 1.98 msaitoh ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5550 1.98 msaitoh {
5551 1.99 msaitoh struct sysctlnode node = *rnode;
5552 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5553 1.186 msaitoh int error = 0, advertise;
5554 1.1 dyoung
5555 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5556 1.169 msaitoh return (EPERM);
5557 1.169 msaitoh
5558 1.333 msaitoh advertise = sc->advertise;
5559 1.98 msaitoh node.sysctl_data = &advertise;
5560 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5561 1.98 msaitoh if (error != 0 || newp == NULL)
5562 1.98 msaitoh return error;
5563 1.28 msaitoh
5564 1.333 msaitoh return ixgbe_set_advertise(sc, advertise);
5565 1.99 msaitoh } /* ixgbe_sysctl_advertise */
5566 1.1 dyoung
5567 1.99 msaitoh /************************************************************************
5568 1.99 msaitoh * ixgbe_set_advertise - Control advertised link speed
5569 1.99 msaitoh *
5570 1.99 msaitoh * Flags:
5571 1.103 msaitoh * 0x00 - Default (all capable link speed)
5572 1.296 msaitoh * 0x1 - advertise 100 Mb
5573 1.296 msaitoh * 0x2 - advertise 1G
5574 1.296 msaitoh * 0x4 - advertise 10G
5575 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5576 1.103 msaitoh * 0x10 - advertise 2.5G
5577 1.103 msaitoh * 0x20 - advertise 5G
5578 1.99 msaitoh ************************************************************************/
5579 1.98 msaitoh static int
5580 1.333 msaitoh ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
5581 1.1 dyoung {
5582 1.186 msaitoh device_t dev;
5583 1.186 msaitoh struct ixgbe_hw *hw;
5584 1.99 msaitoh ixgbe_link_speed speed = 0;
5585 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5586 1.186 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED;
5587 1.186 msaitoh bool negotiate = FALSE;
5588 1.98 msaitoh
5589 1.98 msaitoh /* Checks to validate new value */
5590 1.333 msaitoh if (sc->advertise == advertise) /* no change */
5591 1.98 msaitoh return (0);
5592 1.98 msaitoh
5593 1.333 msaitoh dev = sc->dev;
5594 1.333 msaitoh hw = &sc->hw;
5595 1.98 msaitoh
5596 1.98 msaitoh /* No speed changes for backplane media */
5597 1.98 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane)
5598 1.98 msaitoh return (ENODEV);
5599 1.98 msaitoh
5600 1.98 msaitoh if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5601 1.98 msaitoh (hw->phy.multispeed_fiber))) {
5602 1.98 msaitoh device_printf(dev,
5603 1.98 msaitoh "Advertised speed can only be set on copper or "
5604 1.98 msaitoh "multispeed fiber media types.\n");
5605 1.98 msaitoh return (EINVAL);
5606 1.98 msaitoh }
5607 1.98 msaitoh
5608 1.259 msaitoh if (advertise < 0x0 || advertise > 0x3f) {
5609 1.319 msaitoh device_printf(dev, "Invalid advertised speed; "
5610 1.319 msaitoh "valid modes are 0x0 through 0x3f\n");
5611 1.98 msaitoh return (EINVAL);
5612 1.98 msaitoh }
5613 1.1 dyoung
5614 1.99 msaitoh if (hw->mac.ops.get_link_capabilities) {
5615 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5616 1.99 msaitoh &negotiate);
5617 1.99 msaitoh if (err != IXGBE_SUCCESS) {
5618 1.319 msaitoh device_printf(dev, "Unable to determine supported "
5619 1.319 msaitoh "advertise speeds\n");
5620 1.99 msaitoh return (ENODEV);
5621 1.99 msaitoh }
5622 1.99 msaitoh }
5623 1.99 msaitoh
5624 1.98 msaitoh /* Set new value and report new advertised mode */
5625 1.99 msaitoh if (advertise & 0x1) {
5626 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5627 1.319 msaitoh device_printf(dev, "Interface does not support 100Mb "
5628 1.319 msaitoh "advertised speed\n");
5629 1.98 msaitoh return (EINVAL);
5630 1.98 msaitoh }
5631 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL;
5632 1.99 msaitoh }
5633 1.99 msaitoh if (advertise & 0x2) {
5634 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5635 1.319 msaitoh device_printf(dev, "Interface does not support 1Gb "
5636 1.319 msaitoh "advertised speed\n");
5637 1.99 msaitoh return (EINVAL);
5638 1.99 msaitoh }
5639 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL;
5640 1.99 msaitoh }
5641 1.99 msaitoh if (advertise & 0x4) {
5642 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5643 1.319 msaitoh device_printf(dev, "Interface does not support 10Gb "
5644 1.319 msaitoh "advertised speed\n");
5645 1.99 msaitoh return (EINVAL);
5646 1.99 msaitoh }
5647 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL;
5648 1.99 msaitoh }
5649 1.99 msaitoh if (advertise & 0x8) {
5650 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5651 1.319 msaitoh device_printf(dev, "Interface does not support 10Mb "
5652 1.319 msaitoh "advertised speed\n");
5653 1.99 msaitoh return (EINVAL);
5654 1.99 msaitoh }
5655 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL;
5656 1.99 msaitoh }
5657 1.103 msaitoh if (advertise & 0x10) {
5658 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5659 1.319 msaitoh device_printf(dev, "Interface does not support 2.5Gb "
5660 1.319 msaitoh "advertised speed\n");
5661 1.103 msaitoh return (EINVAL);
5662 1.103 msaitoh }
5663 1.103 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5664 1.103 msaitoh }
5665 1.103 msaitoh if (advertise & 0x20) {
5666 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5667 1.319 msaitoh device_printf(dev, "Interface does not support 5Gb "
5668 1.319 msaitoh "advertised speed\n");
5669 1.103 msaitoh return (EINVAL);
5670 1.103 msaitoh }
5671 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL;
5672 1.103 msaitoh }
5673 1.99 msaitoh if (advertise == 0)
5674 1.99 msaitoh speed = link_caps; /* All capable link speed */
5675 1.1 dyoung
5676 1.98 msaitoh hw->mac.autotry_restart = TRUE;
5677 1.98 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE);
5678 1.333 msaitoh sc->advertise = advertise;
5679 1.1 dyoung
5680 1.99 msaitoh return (0);
5681 1.99 msaitoh } /* ixgbe_set_advertise */
5682 1.1 dyoung
5683 1.99 msaitoh /************************************************************************
5684 1.296 msaitoh * ixgbe_get_default_advertise - Get default advertised speed settings
5685 1.99 msaitoh *
5686 1.99 msaitoh * Formatted for sysctl usage.
5687 1.99 msaitoh * Flags:
5688 1.296 msaitoh * 0x1 - advertise 100 Mb
5689 1.296 msaitoh * 0x2 - advertise 1G
5690 1.296 msaitoh * 0x4 - advertise 10G
5691 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb)
5692 1.103 msaitoh * 0x10 - advertise 2.5G
5693 1.103 msaitoh * 0x20 - advertise 5G
5694 1.99 msaitoh ************************************************************************/
5695 1.98 msaitoh static int
5696 1.333 msaitoh ixgbe_get_default_advertise(struct ixgbe_softc *sc)
5697 1.1 dyoung {
5698 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5699 1.186 msaitoh int speed;
5700 1.99 msaitoh ixgbe_link_speed link_caps = 0;
5701 1.186 msaitoh s32 err;
5702 1.186 msaitoh bool negotiate = FALSE;
5703 1.98 msaitoh
5704 1.99 msaitoh /*
5705 1.99 msaitoh * Advertised speed means nothing unless it's copper or
5706 1.99 msaitoh * multi-speed fiber
5707 1.99 msaitoh */
5708 1.99 msaitoh if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5709 1.99 msaitoh !(hw->phy.multispeed_fiber))
5710 1.99 msaitoh return (0);
5711 1.1 dyoung
5712 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5713 1.99 msaitoh if (err != IXGBE_SUCCESS)
5714 1.99 msaitoh return (0);
5715 1.1 dyoung
5716 1.99 msaitoh speed =
5717 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5718 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5719 1.103 msaitoh ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5720 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5721 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5722 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5723 1.99 msaitoh
5724 1.99 msaitoh return speed;
5725 1.296 msaitoh } /* ixgbe_get_default_advertise */
5726 1.99 msaitoh
5727 1.99 msaitoh /************************************************************************
5728 1.99 msaitoh * ixgbe_sysctl_dmac - Manage DMA Coalescing
5729 1.99 msaitoh *
5730 1.99 msaitoh * Control values:
5731 1.99 msaitoh * 0/1 - off / on (use default value of 1000)
5732 1.99 msaitoh *
5733 1.99 msaitoh * Legal timer values are:
5734 1.99 msaitoh * 50,100,250,500,1000,2000,5000,10000
5735 1.99 msaitoh *
5736 1.99 msaitoh * Turning off interrupt moderation will also turn this off.
5737 1.99 msaitoh ************************************************************************/
5738 1.1 dyoung static int
5739 1.98 msaitoh ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5740 1.1 dyoung {
5741 1.44 msaitoh struct sysctlnode node = *rnode;
5742 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5743 1.333 msaitoh struct ifnet *ifp = sc->ifp;
5744 1.186 msaitoh int error;
5745 1.186 msaitoh int newval;
5746 1.1 dyoung
5747 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5748 1.169 msaitoh return (EPERM);
5749 1.169 msaitoh
5750 1.333 msaitoh newval = sc->dmac;
5751 1.98 msaitoh node.sysctl_data = &newval;
5752 1.22 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5753 1.98 msaitoh if ((error) || (newp == NULL))
5754 1.98 msaitoh return (error);
5755 1.98 msaitoh
5756 1.98 msaitoh switch (newval) {
5757 1.98 msaitoh case 0:
5758 1.98 msaitoh /* Disabled */
5759 1.333 msaitoh sc->dmac = 0;
5760 1.98 msaitoh break;
5761 1.98 msaitoh case 1:
5762 1.98 msaitoh /* Enable and use default */
5763 1.333 msaitoh sc->dmac = 1000;
5764 1.98 msaitoh break;
5765 1.98 msaitoh case 50:
5766 1.98 msaitoh case 100:
5767 1.98 msaitoh case 250:
5768 1.98 msaitoh case 500:
5769 1.98 msaitoh case 1000:
5770 1.98 msaitoh case 2000:
5771 1.98 msaitoh case 5000:
5772 1.98 msaitoh case 10000:
5773 1.98 msaitoh /* Legal values - allow */
5774 1.333 msaitoh sc->dmac = newval;
5775 1.98 msaitoh break;
5776 1.98 msaitoh default:
5777 1.98 msaitoh /* Do nothing, illegal value */
5778 1.98 msaitoh return (EINVAL);
5779 1.22 msaitoh }
5780 1.1 dyoung
5781 1.98 msaitoh /* Re-initialize hardware if it's already running */
5782 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING)
5783 1.302 riastrad if_init(ifp);
5784 1.1 dyoung
5785 1.98 msaitoh return (0);
5786 1.1 dyoung }
5787 1.1 dyoung
5788 1.98 msaitoh #ifdef IXGBE_DEBUG
5789 1.99 msaitoh /************************************************************************
5790 1.99 msaitoh * ixgbe_sysctl_power_state
5791 1.99 msaitoh *
5792 1.99 msaitoh * Sysctl to test power states
5793 1.99 msaitoh * Values:
5794 1.99 msaitoh * 0 - set device to D0
5795 1.99 msaitoh * 3 - set device to D3
5796 1.99 msaitoh * (none) - get current device power state
5797 1.99 msaitoh ************************************************************************/
5798 1.98 msaitoh static int
5799 1.98 msaitoh ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5800 1.44 msaitoh {
5801 1.99 msaitoh #ifdef notyet
5802 1.98 msaitoh struct sysctlnode node = *rnode;
5803 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5804 1.333 msaitoh device_t dev = sc->dev;
5805 1.186 msaitoh int curr_ps, new_ps, error = 0;
5806 1.44 msaitoh
5807 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5808 1.169 msaitoh return (EPERM);
5809 1.169 msaitoh
5810 1.98 msaitoh curr_ps = new_ps = pci_get_powerstate(dev);
5811 1.44 msaitoh
5812 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5813 1.98 msaitoh if ((error) || (req->newp == NULL))
5814 1.98 msaitoh return (error);
5815 1.44 msaitoh
5816 1.98 msaitoh if (new_ps == curr_ps)
5817 1.98 msaitoh return (0);
5818 1.44 msaitoh
5819 1.98 msaitoh if (new_ps == 3 && curr_ps == 0)
5820 1.98 msaitoh error = DEVICE_SUSPEND(dev);
5821 1.98 msaitoh else if (new_ps == 0 && curr_ps == 3)
5822 1.98 msaitoh error = DEVICE_RESUME(dev);
5823 1.98 msaitoh else
5824 1.98 msaitoh return (EINVAL);
5825 1.44 msaitoh
5826 1.98 msaitoh device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5827 1.44 msaitoh
5828 1.98 msaitoh return (error);
5829 1.98 msaitoh #else
5830 1.98 msaitoh return 0;
5831 1.98 msaitoh #endif
5832 1.99 msaitoh } /* ixgbe_sysctl_power_state */
5833 1.98 msaitoh #endif
5834 1.99 msaitoh
5835 1.99 msaitoh /************************************************************************
5836 1.99 msaitoh * ixgbe_sysctl_wol_enable
5837 1.99 msaitoh *
5838 1.99 msaitoh * Sysctl to enable/disable the WoL capability,
5839 1.99 msaitoh * if supported by the adapter.
5840 1.99 msaitoh *
5841 1.99 msaitoh * Values:
5842 1.99 msaitoh * 0 - disabled
5843 1.99 msaitoh * 1 - enabled
5844 1.99 msaitoh ************************************************************************/
5845 1.98 msaitoh static int
5846 1.98 msaitoh ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5847 1.98 msaitoh {
5848 1.98 msaitoh struct sysctlnode node = *rnode;
5849 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5850 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5851 1.186 msaitoh bool new_wol_enabled;
5852 1.186 msaitoh int error = 0;
5853 1.44 msaitoh
5854 1.169 msaitoh /*
5855 1.169 msaitoh * It's not required to check recovery mode because this function never
5856 1.169 msaitoh * touches hardware.
5857 1.169 msaitoh */
5858 1.98 msaitoh new_wol_enabled = hw->wol_enabled;
5859 1.98 msaitoh node.sysctl_data = &new_wol_enabled;
5860 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5861 1.98 msaitoh if ((error) || (newp == NULL))
5862 1.98 msaitoh return (error);
5863 1.98 msaitoh if (new_wol_enabled == hw->wol_enabled)
5864 1.98 msaitoh return (0);
5865 1.44 msaitoh
5866 1.333 msaitoh if (new_wol_enabled && !sc->wol_support)
5867 1.98 msaitoh return (ENODEV);
5868 1.98 msaitoh else
5869 1.98 msaitoh hw->wol_enabled = new_wol_enabled;
5870 1.44 msaitoh
5871 1.98 msaitoh return (0);
5872 1.99 msaitoh } /* ixgbe_sysctl_wol_enable */
5873 1.48 msaitoh
5874 1.99 msaitoh /************************************************************************
5875 1.99 msaitoh * ixgbe_sysctl_wufc - Wake Up Filter Control
5876 1.99 msaitoh *
5877 1.99 msaitoh * Sysctl to enable/disable the types of packets that the
5878 1.99 msaitoh * adapter will wake up on upon receipt.
5879 1.99 msaitoh * Flags:
5880 1.99 msaitoh * 0x1 - Link Status Change
5881 1.99 msaitoh * 0x2 - Magic Packet
5882 1.99 msaitoh * 0x4 - Direct Exact
5883 1.99 msaitoh * 0x8 - Directed Multicast
5884 1.99 msaitoh * 0x10 - Broadcast
5885 1.99 msaitoh * 0x20 - ARP/IPv4 Request Packet
5886 1.99 msaitoh * 0x40 - Direct IPv4 Packet
5887 1.99 msaitoh * 0x80 - Direct IPv6 Packet
5888 1.98 msaitoh *
5889 1.99 msaitoh * Settings not listed above will cause the sysctl to return an error.
5890 1.99 msaitoh ************************************************************************/
5891 1.1 dyoung static int
5892 1.98 msaitoh ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5893 1.1 dyoung {
5894 1.98 msaitoh struct sysctlnode node = *rnode;
5895 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5896 1.98 msaitoh int error = 0;
5897 1.98 msaitoh u32 new_wufc;
5898 1.52 msaitoh
5899 1.169 msaitoh /*
5900 1.169 msaitoh * It's not required to check recovery mode because this function never
5901 1.169 msaitoh * touches hardware.
5902 1.169 msaitoh */
5903 1.333 msaitoh new_wufc = sc->wufc;
5904 1.98 msaitoh node.sysctl_data = &new_wufc;
5905 1.52 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
5906 1.98 msaitoh if ((error) || (newp == NULL))
5907 1.98 msaitoh return (error);
5908 1.333 msaitoh if (new_wufc == sc->wufc)
5909 1.98 msaitoh return (0);
5910 1.98 msaitoh
5911 1.98 msaitoh if (new_wufc & 0xffffff00)
5912 1.98 msaitoh return (EINVAL);
5913 1.99 msaitoh
5914 1.99 msaitoh new_wufc &= 0xff;
5915 1.333 msaitoh new_wufc |= (0xffffff & sc->wufc);
5916 1.333 msaitoh sc->wufc = new_wufc;
5917 1.52 msaitoh
5918 1.98 msaitoh return (0);
5919 1.99 msaitoh } /* ixgbe_sysctl_wufc */
5920 1.52 msaitoh
5921 1.98 msaitoh #ifdef IXGBE_DEBUG
5922 1.99 msaitoh /************************************************************************
5923 1.99 msaitoh * ixgbe_sysctl_print_rss_config
5924 1.99 msaitoh ************************************************************************/
5925 1.52 msaitoh static int
5926 1.98 msaitoh ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5927 1.52 msaitoh {
5928 1.99 msaitoh #ifdef notyet
5929 1.99 msaitoh struct sysctlnode node = *rnode;
5930 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5931 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5932 1.333 msaitoh device_t dev = sc->dev;
5933 1.186 msaitoh struct sbuf *buf;
5934 1.186 msaitoh int error = 0, reta_size;
5935 1.186 msaitoh u32 reg;
5936 1.1 dyoung
5937 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5938 1.169 msaitoh return (EPERM);
5939 1.169 msaitoh
5940 1.98 msaitoh buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5941 1.98 msaitoh if (!buf) {
5942 1.98 msaitoh device_printf(dev, "Could not allocate sbuf for output.\n");
5943 1.98 msaitoh return (ENOMEM);
5944 1.98 msaitoh }
5945 1.52 msaitoh
5946 1.98 msaitoh // TODO: use sbufs to make a string to print out
5947 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */
5948 1.333 msaitoh switch (sc->hw.mac.type) {
5949 1.98 msaitoh case ixgbe_mac_X550:
5950 1.98 msaitoh case ixgbe_mac_X550EM_x:
5951 1.99 msaitoh case ixgbe_mac_X550EM_a:
5952 1.98 msaitoh reta_size = 128;
5953 1.98 msaitoh break;
5954 1.98 msaitoh default:
5955 1.98 msaitoh reta_size = 32;
5956 1.98 msaitoh break;
5957 1.43 msaitoh }
5958 1.1 dyoung
5959 1.98 msaitoh /* Print out the redirection table */
5960 1.98 msaitoh sbuf_cat(buf, "\n");
5961 1.98 msaitoh for (int i = 0; i < reta_size; i++) {
5962 1.98 msaitoh if (i < 32) {
5963 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5964 1.98 msaitoh sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5965 1.98 msaitoh } else {
5966 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5967 1.98 msaitoh sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5968 1.98 msaitoh }
5969 1.28 msaitoh }
5970 1.1 dyoung
5971 1.98 msaitoh // TODO: print more config
5972 1.43 msaitoh
5973 1.98 msaitoh error = sbuf_finish(buf);
5974 1.98 msaitoh if (error)
5975 1.98 msaitoh device_printf(dev, "Error finishing sbuf: %d\n", error);
5976 1.1 dyoung
5977 1.98 msaitoh sbuf_delete(buf);
5978 1.99 msaitoh #endif
5979 1.98 msaitoh return (0);
5980 1.99 msaitoh } /* ixgbe_sysctl_print_rss_config */
5981 1.98 msaitoh #endif /* IXGBE_DEBUG */
5982 1.24 msaitoh
5983 1.99 msaitoh /************************************************************************
5984 1.99 msaitoh * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5985 1.99 msaitoh *
5986 1.99 msaitoh * For X552/X557-AT devices using an external PHY
5987 1.99 msaitoh ************************************************************************/
5988 1.44 msaitoh static int
5989 1.44 msaitoh ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5990 1.44 msaitoh {
5991 1.44 msaitoh struct sysctlnode node = *rnode;
5992 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
5993 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
5994 1.44 msaitoh int val;
5995 1.44 msaitoh u16 reg;
5996 1.44 msaitoh int error;
5997 1.44 msaitoh
5998 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
5999 1.169 msaitoh return (EPERM);
6000 1.169 msaitoh
6001 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6002 1.325 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6003 1.333 msaitoh device_printf(sc->dev,
6004 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6005 1.44 msaitoh return (ENODEV);
6006 1.44 msaitoh }
6007 1.44 msaitoh
6008 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
6009 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6010 1.333 msaitoh device_printf(sc->dev,
6011 1.44 msaitoh "Error reading from PHY's current temperature register\n");
6012 1.44 msaitoh return (EAGAIN);
6013 1.44 msaitoh }
6014 1.44 msaitoh
6015 1.44 msaitoh node.sysctl_data = &val;
6016 1.44 msaitoh
6017 1.44 msaitoh /* Shift temp for output */
6018 1.44 msaitoh val = reg >> 8;
6019 1.44 msaitoh
6020 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6021 1.44 msaitoh if ((error) || (newp == NULL))
6022 1.44 msaitoh return (error);
6023 1.44 msaitoh
6024 1.44 msaitoh return (0);
6025 1.99 msaitoh } /* ixgbe_sysctl_phy_temp */
6026 1.44 msaitoh
6027 1.99 msaitoh /************************************************************************
6028 1.99 msaitoh * ixgbe_sysctl_phy_overtemp_occurred
6029 1.99 msaitoh *
6030 1.99 msaitoh * Reports (directly from the PHY) whether the current PHY
6031 1.99 msaitoh * temperature is over the overtemp threshold.
6032 1.99 msaitoh ************************************************************************/
6033 1.44 msaitoh static int
6034 1.44 msaitoh ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
6035 1.44 msaitoh {
6036 1.44 msaitoh struct sysctlnode node = *rnode;
6037 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6038 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6039 1.44 msaitoh int val, error;
6040 1.44 msaitoh u16 reg;
6041 1.44 msaitoh
6042 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6043 1.169 msaitoh return (EPERM);
6044 1.169 msaitoh
6045 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) &&
6046 1.344 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) {
6047 1.333 msaitoh device_printf(sc->dev,
6048 1.44 msaitoh "Device has no supported external thermal sensor.\n");
6049 1.44 msaitoh return (ENODEV);
6050 1.44 msaitoh }
6051 1.44 msaitoh
6052 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
6053 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6054 1.333 msaitoh device_printf(sc->dev,
6055 1.44 msaitoh "Error reading from PHY's temperature status register\n");
6056 1.44 msaitoh return (EAGAIN);
6057 1.44 msaitoh }
6058 1.44 msaitoh
6059 1.44 msaitoh node.sysctl_data = &val;
6060 1.44 msaitoh
6061 1.44 msaitoh /* Get occurrence bit */
6062 1.44 msaitoh val = !!(reg & 0x4000);
6063 1.44 msaitoh
6064 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6065 1.44 msaitoh if ((error) || (newp == NULL))
6066 1.44 msaitoh return (error);
6067 1.44 msaitoh
6068 1.44 msaitoh return (0);
6069 1.99 msaitoh } /* ixgbe_sysctl_phy_overtemp_occurred */
6070 1.99 msaitoh
6071 1.99 msaitoh /************************************************************************
6072 1.99 msaitoh * ixgbe_sysctl_eee_state
6073 1.99 msaitoh *
6074 1.99 msaitoh * Sysctl to set EEE power saving feature
6075 1.99 msaitoh * Values:
6076 1.99 msaitoh * 0 - disable EEE
6077 1.99 msaitoh * 1 - enable EEE
6078 1.99 msaitoh * (none) - get current device EEE state
6079 1.99 msaitoh ************************************************************************/
6080 1.99 msaitoh static int
6081 1.99 msaitoh ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6082 1.99 msaitoh {
6083 1.99 msaitoh struct sysctlnode node = *rnode;
6084 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6085 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6086 1.333 msaitoh device_t dev = sc->dev;
6087 1.186 msaitoh int curr_eee, new_eee, error = 0;
6088 1.186 msaitoh s32 retval;
6089 1.99 msaitoh
6090 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6091 1.169 msaitoh return (EPERM);
6092 1.169 msaitoh
6093 1.333 msaitoh curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
6094 1.99 msaitoh node.sysctl_data = &new_eee;
6095 1.99 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6096 1.99 msaitoh if ((error) || (newp == NULL))
6097 1.99 msaitoh return (error);
6098 1.99 msaitoh
6099 1.99 msaitoh /* Nothing to do */
6100 1.99 msaitoh if (new_eee == curr_eee)
6101 1.99 msaitoh return (0);
6102 1.99 msaitoh
6103 1.99 msaitoh /* Not supported */
6104 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
6105 1.99 msaitoh return (EINVAL);
6106 1.99 msaitoh
6107 1.99 msaitoh /* Bounds checking */
6108 1.99 msaitoh if ((new_eee < 0) || (new_eee > 1))
6109 1.99 msaitoh return (EINVAL);
6110 1.99 msaitoh
6111 1.333 msaitoh retval = ixgbe_setup_eee(&sc->hw, new_eee);
6112 1.99 msaitoh if (retval) {
6113 1.99 msaitoh device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6114 1.99 msaitoh return (EINVAL);
6115 1.99 msaitoh }
6116 1.99 msaitoh
6117 1.99 msaitoh /* Restart auto-neg */
6118 1.302 riastrad if_init(ifp);
6119 1.99 msaitoh
6120 1.99 msaitoh device_printf(dev, "New EEE state: %d\n", new_eee);
6121 1.99 msaitoh
6122 1.99 msaitoh /* Cache new value */
6123 1.99 msaitoh if (new_eee)
6124 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6125 1.99 msaitoh else
6126 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_EEE;
6127 1.99 msaitoh
6128 1.99 msaitoh return (error);
6129 1.99 msaitoh } /* ixgbe_sysctl_eee_state */
6130 1.99 msaitoh
6131 1.333 msaitoh #define PRINTQS(sc, regname) \
6132 1.158 msaitoh do { \
6133 1.333 msaitoh struct ixgbe_hw *_hw = &(sc)->hw; \
6134 1.158 msaitoh int _i; \
6135 1.158 msaitoh \
6136 1.333 msaitoh printf("%s: %s", device_xname((sc)->dev), #regname); \
6137 1.333 msaitoh for (_i = 0; _i < (sc)->num_queues; _i++) { \
6138 1.158 msaitoh printf((_i == 0) ? "\t" : " "); \
6139 1.158 msaitoh printf("%08x", IXGBE_READ_REG(_hw, \
6140 1.158 msaitoh IXGBE_##regname(_i))); \
6141 1.158 msaitoh } \
6142 1.158 msaitoh printf("\n"); \
6143 1.158 msaitoh } while (0)
6144 1.158 msaitoh
6145 1.158 msaitoh /************************************************************************
6146 1.158 msaitoh * ixgbe_print_debug_info
6147 1.158 msaitoh *
6148 1.158 msaitoh * Called only when em_display_debug_stats is enabled.
6149 1.158 msaitoh * Provides a way to take a look at important statistics
6150 1.158 msaitoh * maintained by the driver and hardware.
6151 1.158 msaitoh ************************************************************************/
6152 1.158 msaitoh static void
6153 1.333 msaitoh ixgbe_print_debug_info(struct ixgbe_softc *sc)
6154 1.158 msaitoh {
6155 1.333 msaitoh device_t dev = sc->dev;
6156 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6157 1.158 msaitoh int table_size;
6158 1.158 msaitoh int i;
6159 1.158 msaitoh
6160 1.333 msaitoh switch (sc->hw.mac.type) {
6161 1.158 msaitoh case ixgbe_mac_X550:
6162 1.158 msaitoh case ixgbe_mac_X550EM_x:
6163 1.158 msaitoh case ixgbe_mac_X550EM_a:
6164 1.158 msaitoh table_size = 128;
6165 1.158 msaitoh break;
6166 1.158 msaitoh default:
6167 1.158 msaitoh table_size = 32;
6168 1.158 msaitoh break;
6169 1.158 msaitoh }
6170 1.185 msaitoh
6171 1.158 msaitoh device_printf(dev, "[E]RETA:\n");
6172 1.158 msaitoh for (i = 0; i < table_size; i++) {
6173 1.158 msaitoh if (i < 32)
6174 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6175 1.158 msaitoh IXGBE_RETA(i)));
6176 1.158 msaitoh else
6177 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6178 1.158 msaitoh IXGBE_ERETA(i - 32)));
6179 1.158 msaitoh }
6180 1.158 msaitoh
6181 1.158 msaitoh device_printf(dev, "queue:");
6182 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6183 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6184 1.158 msaitoh printf("%8d", i);
6185 1.158 msaitoh }
6186 1.158 msaitoh printf("\n");
6187 1.333 msaitoh PRINTQS(sc, RDBAL);
6188 1.333 msaitoh PRINTQS(sc, RDBAH);
6189 1.333 msaitoh PRINTQS(sc, RDLEN);
6190 1.333 msaitoh PRINTQS(sc, SRRCTL);
6191 1.333 msaitoh PRINTQS(sc, RDH);
6192 1.333 msaitoh PRINTQS(sc, RDT);
6193 1.333 msaitoh PRINTQS(sc, RXDCTL);
6194 1.158 msaitoh
6195 1.158 msaitoh device_printf(dev, "RQSMR:");
6196 1.333 msaitoh for (i = 0; i < sc->num_queues / 4; i++) {
6197 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6198 1.158 msaitoh printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6199 1.158 msaitoh }
6200 1.158 msaitoh printf("\n");
6201 1.158 msaitoh
6202 1.158 msaitoh device_printf(dev, "disabled_count:");
6203 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) {
6204 1.158 msaitoh printf((i == 0) ? "\t" : " ");
6205 1.333 msaitoh printf("%8d", sc->queues[i].disabled_count);
6206 1.158 msaitoh }
6207 1.158 msaitoh printf("\n");
6208 1.185 msaitoh
6209 1.158 msaitoh device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6210 1.158 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) {
6211 1.158 msaitoh device_printf(dev, "EIMS_EX(0):\t%08x\n",
6212 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6213 1.158 msaitoh device_printf(dev, "EIMS_EX(1):\t%08x\n",
6214 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6215 1.158 msaitoh }
6216 1.265 msaitoh device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6217 1.265 msaitoh device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6218 1.158 msaitoh } /* ixgbe_print_debug_info */
6219 1.158 msaitoh
6220 1.158 msaitoh /************************************************************************
6221 1.158 msaitoh * ixgbe_sysctl_debug
6222 1.158 msaitoh ************************************************************************/
6223 1.158 msaitoh static int
6224 1.158 msaitoh ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6225 1.158 msaitoh {
6226 1.158 msaitoh struct sysctlnode node = *rnode;
6227 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6228 1.186 msaitoh int error, result = 0;
6229 1.158 msaitoh
6230 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6231 1.169 msaitoh return (EPERM);
6232 1.169 msaitoh
6233 1.158 msaitoh node.sysctl_data = &result;
6234 1.158 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6235 1.158 msaitoh
6236 1.158 msaitoh if (error || newp == NULL)
6237 1.158 msaitoh return error;
6238 1.158 msaitoh
6239 1.158 msaitoh if (result == 1)
6240 1.333 msaitoh ixgbe_print_debug_info(sc);
6241 1.158 msaitoh
6242 1.158 msaitoh return 0;
6243 1.158 msaitoh } /* ixgbe_sysctl_debug */
6244 1.158 msaitoh
6245 1.99 msaitoh /************************************************************************
6246 1.286 msaitoh * ixgbe_sysctl_rx_copy_len
6247 1.286 msaitoh ************************************************************************/
6248 1.286 msaitoh static int
6249 1.286 msaitoh ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6250 1.286 msaitoh {
6251 1.286 msaitoh struct sysctlnode node = *rnode;
6252 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6253 1.286 msaitoh int error;
6254 1.333 msaitoh int result = sc->rx_copy_len;
6255 1.286 msaitoh
6256 1.286 msaitoh node.sysctl_data = &result;
6257 1.286 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6258 1.286 msaitoh
6259 1.286 msaitoh if (error || newp == NULL)
6260 1.286 msaitoh return error;
6261 1.286 msaitoh
6262 1.286 msaitoh if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6263 1.286 msaitoh return EINVAL;
6264 1.286 msaitoh
6265 1.333 msaitoh sc->rx_copy_len = result;
6266 1.286 msaitoh
6267 1.286 msaitoh return 0;
6268 1.286 msaitoh } /* ixgbe_sysctl_rx_copy_len */
6269 1.286 msaitoh
6270 1.286 msaitoh /************************************************************************
6271 1.313 msaitoh * ixgbe_sysctl_tx_process_limit
6272 1.313 msaitoh ************************************************************************/
6273 1.313 msaitoh static int
6274 1.313 msaitoh ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6275 1.313 msaitoh {
6276 1.313 msaitoh struct sysctlnode node = *rnode;
6277 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6278 1.313 msaitoh int error;
6279 1.333 msaitoh int result = sc->tx_process_limit;
6280 1.313 msaitoh
6281 1.313 msaitoh node.sysctl_data = &result;
6282 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6283 1.313 msaitoh
6284 1.313 msaitoh if (error || newp == NULL)
6285 1.313 msaitoh return error;
6286 1.313 msaitoh
6287 1.333 msaitoh if ((result <= 0) || (result > sc->num_tx_desc))
6288 1.313 msaitoh return EINVAL;
6289 1.313 msaitoh
6290 1.333 msaitoh sc->tx_process_limit = result;
6291 1.313 msaitoh
6292 1.313 msaitoh return 0;
6293 1.313 msaitoh } /* ixgbe_sysctl_tx_process_limit */
6294 1.313 msaitoh
6295 1.313 msaitoh /************************************************************************
6296 1.313 msaitoh * ixgbe_sysctl_rx_process_limit
6297 1.313 msaitoh ************************************************************************/
6298 1.313 msaitoh static int
6299 1.313 msaitoh ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6300 1.313 msaitoh {
6301 1.313 msaitoh struct sysctlnode node = *rnode;
6302 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6303 1.313 msaitoh int error;
6304 1.333 msaitoh int result = sc->rx_process_limit;
6305 1.313 msaitoh
6306 1.313 msaitoh node.sysctl_data = &result;
6307 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node));
6308 1.313 msaitoh
6309 1.313 msaitoh if (error || newp == NULL)
6310 1.313 msaitoh return error;
6311 1.313 msaitoh
6312 1.333 msaitoh if ((result <= 0) || (result > sc->num_rx_desc))
6313 1.313 msaitoh return EINVAL;
6314 1.313 msaitoh
6315 1.333 msaitoh sc->rx_process_limit = result;
6316 1.313 msaitoh
6317 1.313 msaitoh return 0;
6318 1.313 msaitoh } /* ixgbe_sysctl_rx_process_limit */
6319 1.313 msaitoh
6320 1.313 msaitoh /************************************************************************
6321 1.99 msaitoh * ixgbe_init_device_features
6322 1.99 msaitoh ************************************************************************/
6323 1.99 msaitoh static void
6324 1.333 msaitoh ixgbe_init_device_features(struct ixgbe_softc *sc)
6325 1.99 msaitoh {
6326 1.333 msaitoh sc->feat_cap = IXGBE_FEATURE_NETMAP
6327 1.186 msaitoh | IXGBE_FEATURE_RSS
6328 1.186 msaitoh | IXGBE_FEATURE_MSI
6329 1.186 msaitoh | IXGBE_FEATURE_MSIX
6330 1.186 msaitoh | IXGBE_FEATURE_LEGACY_IRQ
6331 1.186 msaitoh | IXGBE_FEATURE_LEGACY_TX;
6332 1.99 msaitoh
6333 1.99 msaitoh /* Set capabilities first... */
6334 1.333 msaitoh switch (sc->hw.mac.type) {
6335 1.99 msaitoh case ixgbe_mac_82598EB:
6336 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
6337 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6338 1.99 msaitoh break;
6339 1.99 msaitoh case ixgbe_mac_X540:
6340 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6341 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6342 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6343 1.333 msaitoh (sc->hw.bus.func == 0))
6344 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6345 1.99 msaitoh break;
6346 1.99 msaitoh case ixgbe_mac_X550:
6347 1.169 msaitoh /*
6348 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6349 1.169 msaitoh * NVM Image version.
6350 1.169 msaitoh */
6351 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6352 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6353 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6354 1.99 msaitoh break;
6355 1.99 msaitoh case ixgbe_mac_X550EM_x:
6356 1.169 msaitoh /*
6357 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6358 1.169 msaitoh * NVM Image version.
6359 1.169 msaitoh */
6360 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6361 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6362 1.99 msaitoh break;
6363 1.99 msaitoh case ixgbe_mac_X550EM_a:
6364 1.169 msaitoh /*
6365 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6366 1.169 msaitoh * NVM Image version.
6367 1.169 msaitoh */
6368 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6369 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6370 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6371 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6372 1.333 msaitoh (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6373 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6374 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_EEE;
6375 1.99 msaitoh }
6376 1.99 msaitoh break;
6377 1.99 msaitoh case ixgbe_mac_82599EB:
6378 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6379 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR;
6380 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6381 1.333 msaitoh (sc->hw.bus.func == 0))
6382 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6383 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6384 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6385 1.99 msaitoh break;
6386 1.99 msaitoh default:
6387 1.99 msaitoh break;
6388 1.99 msaitoh }
6389 1.99 msaitoh
6390 1.99 msaitoh /* Enabled by default... */
6391 1.99 msaitoh /* Fan failure detection */
6392 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6393 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6394 1.99 msaitoh /* Netmap */
6395 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
6396 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_NETMAP;
6397 1.99 msaitoh /* EEE */
6398 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE)
6399 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE;
6400 1.99 msaitoh /* Thermal Sensor */
6401 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6402 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6403 1.169 msaitoh /*
6404 1.169 msaitoh * Recovery mode:
6405 1.169 msaitoh * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6406 1.169 msaitoh * NVM Image version.
6407 1.169 msaitoh */
6408 1.99 msaitoh
6409 1.99 msaitoh /* Enabled via global sysctl... */
6410 1.99 msaitoh /* Flow Director */
6411 1.99 msaitoh if (ixgbe_enable_fdir) {
6412 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FDIR)
6413 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FDIR;
6414 1.99 msaitoh else
6415 1.333 msaitoh device_printf(sc->dev, "Device does not support "
6416 1.320 msaitoh "Flow Director. Leaving disabled.");
6417 1.99 msaitoh }
6418 1.99 msaitoh /* Legacy (single queue) transmit */
6419 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6420 1.99 msaitoh ixgbe_enable_legacy_tx)
6421 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6422 1.99 msaitoh /*
6423 1.99 msaitoh * Message Signal Interrupts - Extended (MSI-X)
6424 1.99 msaitoh * Normal MSI is only enabled if MSI-X calls fail.
6425 1.99 msaitoh */
6426 1.99 msaitoh if (!ixgbe_enable_msix)
6427 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
6428 1.99 msaitoh /* Receive-Side Scaling (RSS) */
6429 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6430 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RSS;
6431 1.99 msaitoh
6432 1.99 msaitoh /* Disable features with unmet dependencies... */
6433 1.99 msaitoh /* No MSI-X */
6434 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
6435 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
6436 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6437 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
6438 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
6439 1.99 msaitoh }
6440 1.99 msaitoh } /* ixgbe_init_device_features */
6441 1.44 msaitoh
6442 1.99 msaitoh /************************************************************************
6443 1.99 msaitoh * ixgbe_probe - Device identification routine
6444 1.98 msaitoh *
6445 1.99 msaitoh * Determines if the driver should be loaded on
6446 1.99 msaitoh * adapter based on its PCI vendor/device ID.
6447 1.98 msaitoh *
6448 1.99 msaitoh * return BUS_PROBE_DEFAULT on success, positive on failure
6449 1.99 msaitoh ************************************************************************/
6450 1.98 msaitoh static int
6451 1.98 msaitoh ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6452 1.98 msaitoh {
6453 1.98 msaitoh const struct pci_attach_args *pa = aux;
6454 1.98 msaitoh
6455 1.98 msaitoh return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6456 1.98 msaitoh }
6457 1.98 msaitoh
6458 1.159 maxv static const ixgbe_vendor_info_t *
6459 1.98 msaitoh ixgbe_lookup(const struct pci_attach_args *pa)
6460 1.98 msaitoh {
6461 1.159 maxv const ixgbe_vendor_info_t *ent;
6462 1.98 msaitoh pcireg_t subid;
6463 1.98 msaitoh
6464 1.98 msaitoh INIT_DEBUGOUT("ixgbe_lookup: begin");
6465 1.98 msaitoh
6466 1.98 msaitoh if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6467 1.98 msaitoh return NULL;
6468 1.98 msaitoh
6469 1.98 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6470 1.98 msaitoh
6471 1.98 msaitoh for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6472 1.99 msaitoh if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6473 1.99 msaitoh (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6474 1.99 msaitoh ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6475 1.99 msaitoh (ent->subvendor_id == 0)) &&
6476 1.99 msaitoh ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6477 1.99 msaitoh (ent->subdevice_id == 0))) {
6478 1.98 msaitoh return ent;
6479 1.98 msaitoh }
6480 1.98 msaitoh }
6481 1.98 msaitoh return NULL;
6482 1.98 msaitoh }
6483 1.98 msaitoh
6484 1.98 msaitoh static int
6485 1.98 msaitoh ixgbe_ifflags_cb(struct ethercom *ec)
6486 1.98 msaitoh {
6487 1.98 msaitoh struct ifnet *ifp = &ec->ec_if;
6488 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6489 1.210 msaitoh u_short change;
6490 1.210 msaitoh int rv = 0;
6491 1.98 msaitoh
6492 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6493 1.98 msaitoh
6494 1.333 msaitoh change = ifp->if_flags ^ sc->if_flags;
6495 1.98 msaitoh if (change != 0)
6496 1.333 msaitoh sc->if_flags = ifp->if_flags;
6497 1.98 msaitoh
6498 1.192 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6499 1.192 msaitoh rv = ENETRESET;
6500 1.192 msaitoh goto out;
6501 1.192 msaitoh } else if ((change & IFF_PROMISC) != 0)
6502 1.333 msaitoh ixgbe_set_rxfilter(sc);
6503 1.98 msaitoh
6504 1.193 msaitoh /* Check for ec_capenable. */
6505 1.333 msaitoh change = ec->ec_capenable ^ sc->ec_capenable;
6506 1.333 msaitoh sc->ec_capenable = ec->ec_capenable;
6507 1.193 msaitoh if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6508 1.193 msaitoh | ETHERCAP_VLAN_HWFILTER)) != 0) {
6509 1.193 msaitoh rv = ENETRESET;
6510 1.193 msaitoh goto out;
6511 1.193 msaitoh }
6512 1.193 msaitoh
6513 1.193 msaitoh /*
6514 1.193 msaitoh * Special handling is not required for ETHERCAP_VLAN_MTU.
6515 1.193 msaitoh * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6516 1.193 msaitoh */
6517 1.193 msaitoh
6518 1.98 msaitoh /* Set up VLAN support and filter */
6519 1.193 msaitoh if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6520 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc);
6521 1.98 msaitoh
6522 1.192 msaitoh out:
6523 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6524 1.98 msaitoh
6525 1.192 msaitoh return rv;
6526 1.98 msaitoh }
6527 1.98 msaitoh
6528 1.99 msaitoh /************************************************************************
6529 1.99 msaitoh * ixgbe_ioctl - Ioctl entry point
6530 1.98 msaitoh *
6531 1.99 msaitoh * Called when the user wants to configure the interface.
6532 1.98 msaitoh *
6533 1.99 msaitoh * return 0 on success, positive on failure
6534 1.99 msaitoh ************************************************************************/
6535 1.98 msaitoh static int
6536 1.232 msaitoh ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6537 1.98 msaitoh {
6538 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc;
6539 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
6540 1.98 msaitoh struct ifcapreq *ifcr = data;
6541 1.98 msaitoh struct ifreq *ifr = data;
6542 1.186 msaitoh int error = 0;
6543 1.98 msaitoh int l4csum_en;
6544 1.185 msaitoh const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6545 1.185 msaitoh IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6546 1.98 msaitoh
6547 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc))
6548 1.169 msaitoh return (EPERM);
6549 1.169 msaitoh
6550 1.98 msaitoh switch (command) {
6551 1.98 msaitoh case SIOCSIFFLAGS:
6552 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6553 1.98 msaitoh break;
6554 1.98 msaitoh case SIOCADDMULTI:
6555 1.98 msaitoh case SIOCDELMULTI:
6556 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6557 1.98 msaitoh break;
6558 1.98 msaitoh case SIOCSIFMEDIA:
6559 1.98 msaitoh case SIOCGIFMEDIA:
6560 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6561 1.98 msaitoh break;
6562 1.98 msaitoh case SIOCSIFCAP:
6563 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6564 1.98 msaitoh break;
6565 1.98 msaitoh case SIOCSIFMTU:
6566 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6567 1.98 msaitoh break;
6568 1.98 msaitoh #ifdef __NetBSD__
6569 1.98 msaitoh case SIOCINITIFADDR:
6570 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6571 1.98 msaitoh break;
6572 1.98 msaitoh case SIOCGIFFLAGS:
6573 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6574 1.98 msaitoh break;
6575 1.98 msaitoh case SIOCGIFAFLAG_IN:
6576 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6577 1.98 msaitoh break;
6578 1.98 msaitoh case SIOCGIFADDR:
6579 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6580 1.98 msaitoh break;
6581 1.98 msaitoh case SIOCGIFMTU:
6582 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6583 1.98 msaitoh break;
6584 1.98 msaitoh case SIOCGIFCAP:
6585 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6586 1.98 msaitoh break;
6587 1.98 msaitoh case SIOCGETHERCAP:
6588 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6589 1.98 msaitoh break;
6590 1.98 msaitoh case SIOCGLIFADDR:
6591 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6592 1.98 msaitoh break;
6593 1.98 msaitoh case SIOCZIFDATA:
6594 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6595 1.98 msaitoh hw->mac.ops.clear_hw_cntrs(hw);
6596 1.333 msaitoh ixgbe_clear_evcnt(sc);
6597 1.98 msaitoh break;
6598 1.98 msaitoh case SIOCAIFADDR:
6599 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6600 1.98 msaitoh break;
6601 1.98 msaitoh #endif
6602 1.98 msaitoh default:
6603 1.98 msaitoh IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6604 1.98 msaitoh break;
6605 1.98 msaitoh }
6606 1.24 msaitoh
6607 1.98 msaitoh switch (command) {
6608 1.98 msaitoh case SIOCGI2C:
6609 1.98 msaitoh {
6610 1.98 msaitoh struct ixgbe_i2c_req i2c;
6611 1.24 msaitoh
6612 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6613 1.98 msaitoh error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6614 1.98 msaitoh if (error != 0)
6615 1.98 msaitoh break;
6616 1.98 msaitoh if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6617 1.98 msaitoh error = EINVAL;
6618 1.98 msaitoh break;
6619 1.98 msaitoh }
6620 1.98 msaitoh if (i2c.len > sizeof(i2c.data)) {
6621 1.98 msaitoh error = EINVAL;
6622 1.98 msaitoh break;
6623 1.98 msaitoh }
6624 1.24 msaitoh
6625 1.98 msaitoh hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6626 1.98 msaitoh i2c.dev_addr, i2c.data);
6627 1.98 msaitoh error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6628 1.98 msaitoh break;
6629 1.98 msaitoh }
6630 1.98 msaitoh case SIOCSIFCAP:
6631 1.98 msaitoh /* Layer-4 Rx checksum offload has to be turned on and
6632 1.98 msaitoh * off as a unit.
6633 1.98 msaitoh */
6634 1.98 msaitoh l4csum_en = ifcr->ifcr_capenable & l4csum;
6635 1.98 msaitoh if (l4csum_en != l4csum && l4csum_en != 0)
6636 1.98 msaitoh return EINVAL;
6637 1.98 msaitoh /*FALLTHROUGH*/
6638 1.98 msaitoh case SIOCADDMULTI:
6639 1.98 msaitoh case SIOCDELMULTI:
6640 1.98 msaitoh case SIOCSIFFLAGS:
6641 1.98 msaitoh case SIOCSIFMTU:
6642 1.98 msaitoh default:
6643 1.98 msaitoh if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6644 1.98 msaitoh return error;
6645 1.98 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0)
6646 1.98 msaitoh ;
6647 1.98 msaitoh else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6648 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6649 1.135 msaitoh if ((ifp->if_flags & IFF_RUNNING) != 0)
6650 1.333 msaitoh ixgbe_init_locked(sc);
6651 1.333 msaitoh ixgbe_recalculate_max_frame(sc);
6652 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6653 1.98 msaitoh } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6654 1.98 msaitoh /*
6655 1.98 msaitoh * Multicast list has changed; set the hardware filter
6656 1.98 msaitoh * accordingly.
6657 1.98 msaitoh */
6658 1.333 msaitoh IXGBE_CORE_LOCK(sc);
6659 1.333 msaitoh ixgbe_disable_intr(sc);
6660 1.333 msaitoh ixgbe_set_rxfilter(sc);
6661 1.333 msaitoh ixgbe_enable_intr(sc);
6662 1.333 msaitoh IXGBE_CORE_UNLOCK(sc);
6663 1.98 msaitoh }
6664 1.98 msaitoh return 0;
6665 1.24 msaitoh }
6666 1.24 msaitoh
6667 1.98 msaitoh return error;
6668 1.99 msaitoh } /* ixgbe_ioctl */
6669 1.99 msaitoh
6670 1.99 msaitoh /************************************************************************
6671 1.99 msaitoh * ixgbe_check_fan_failure
6672 1.99 msaitoh ************************************************************************/
6673 1.274 msaitoh static int
6674 1.333 msaitoh ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
6675 1.99 msaitoh {
6676 1.99 msaitoh u32 mask;
6677 1.99 msaitoh
6678 1.333 msaitoh mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
6679 1.99 msaitoh IXGBE_ESDP_SDP1;
6680 1.26 msaitoh
6681 1.312 msaitoh if ((reg & mask) == 0)
6682 1.312 msaitoh return IXGBE_SUCCESS;
6683 1.312 msaitoh
6684 1.312 msaitoh /*
6685 1.312 msaitoh * Use ratecheck() just in case interrupt occur frequently.
6686 1.312 msaitoh * When EXPX9501AT's fan stopped, interrupt occurred only once,
6687 1.312 msaitoh * an red LED on the board turned on and link never up until
6688 1.312 msaitoh * power off.
6689 1.312 msaitoh */
6690 1.333 msaitoh if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl))
6691 1.333 msaitoh device_printf(sc->dev,
6692 1.280 msaitoh "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6693 1.274 msaitoh
6694 1.312 msaitoh return IXGBE_ERR_FAN_FAILURE;
6695 1.99 msaitoh } /* ixgbe_check_fan_failure */
6696 1.99 msaitoh
6697 1.99 msaitoh /************************************************************************
6698 1.99 msaitoh * ixgbe_handle_que
6699 1.99 msaitoh ************************************************************************/
6700 1.98 msaitoh static void
6701 1.98 msaitoh ixgbe_handle_que(void *context)
6702 1.44 msaitoh {
6703 1.98 msaitoh struct ix_queue *que = context;
6704 1.333 msaitoh struct ixgbe_softc *sc = que->sc;
6705 1.186 msaitoh struct tx_ring *txr = que->txr;
6706 1.333 msaitoh struct ifnet *ifp = sc->ifp;
6707 1.121 msaitoh bool more = false;
6708 1.44 msaitoh
6709 1.305 msaitoh IXGBE_EVC_ADD(&que->handleq, 1);
6710 1.44 msaitoh
6711 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING) {
6712 1.98 msaitoh IXGBE_TX_LOCK(txr);
6713 1.323 msaitoh more = ixgbe_txeof(txr);
6714 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
6715 1.99 msaitoh if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6716 1.99 msaitoh ixgbe_mq_start_locked(ifp, txr);
6717 1.98 msaitoh /* Only for queue 0 */
6718 1.99 msaitoh /* NetBSD still needs this for CBQ */
6719 1.333 msaitoh if ((&sc->queues[0] == que)
6720 1.99 msaitoh && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6721 1.99 msaitoh ixgbe_legacy_start_locked(ifp, txr);
6722 1.98 msaitoh IXGBE_TX_UNLOCK(txr);
6723 1.323 msaitoh more |= ixgbe_rxeof(que);
6724 1.44 msaitoh }
6725 1.44 msaitoh
6726 1.128 knakahar if (more) {
6727 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1);
6728 1.333 msaitoh ixgbe_sched_handle_que(sc, que);
6729 1.128 knakahar } else if (que->res != NULL) {
6730 1.265 msaitoh /* MSIX: Re-enable this interrupt */
6731 1.333 msaitoh ixgbe_enable_queue(sc, que->msix);
6732 1.265 msaitoh } else {
6733 1.265 msaitoh /* INTx or MSI */
6734 1.333 msaitoh ixgbe_enable_queue(sc, 0);
6735 1.265 msaitoh }
6736 1.99 msaitoh
6737 1.98 msaitoh return;
6738 1.99 msaitoh } /* ixgbe_handle_que */
6739 1.44 msaitoh
6740 1.99 msaitoh /************************************************************************
6741 1.128 knakahar * ixgbe_handle_que_work
6742 1.128 knakahar ************************************************************************/
6743 1.128 knakahar static void
6744 1.128 knakahar ixgbe_handle_que_work(struct work *wk, void *context)
6745 1.128 knakahar {
6746 1.128 knakahar struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6747 1.128 knakahar
6748 1.128 knakahar /*
6749 1.128 knakahar * "enqueued flag" is not required here.
6750 1.128 knakahar * See ixgbe_msix_que().
6751 1.128 knakahar */
6752 1.128 knakahar ixgbe_handle_que(que);
6753 1.128 knakahar }
6754 1.128 knakahar
6755 1.128 knakahar /************************************************************************
6756 1.99 msaitoh * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6757 1.99 msaitoh ************************************************************************/
6758 1.48 msaitoh static int
6759 1.333 msaitoh ixgbe_allocate_legacy(struct ixgbe_softc *sc,
6760 1.98 msaitoh const struct pci_attach_args *pa)
6761 1.48 msaitoh {
6762 1.333 msaitoh device_t dev = sc->dev;
6763 1.333 msaitoh struct ix_queue *que = sc->queues;
6764 1.333 msaitoh struct tx_ring *txr = sc->tx_rings;
6765 1.98 msaitoh int counts[PCI_INTR_TYPE_SIZE];
6766 1.98 msaitoh pci_intr_type_t intr_type, max_type;
6767 1.186 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6768 1.206 knakahar char wqname[MAXCOMLEN];
6769 1.98 msaitoh const char *intrstr = NULL;
6770 1.206 knakahar int defertx_error = 0, error;
6771 1.185 msaitoh
6772 1.99 msaitoh /* We allocate a single interrupt resource */
6773 1.98 msaitoh max_type = PCI_INTR_TYPE_MSI;
6774 1.98 msaitoh counts[PCI_INTR_TYPE_MSIX] = 0;
6775 1.99 msaitoh counts[PCI_INTR_TYPE_MSI] =
6776 1.333 msaitoh (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6777 1.118 msaitoh /* Check not feat_en but feat_cap to fallback to INTx */
6778 1.99 msaitoh counts[PCI_INTR_TYPE_INTX] =
6779 1.333 msaitoh (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6780 1.48 msaitoh
6781 1.98 msaitoh alloc_retry:
6782 1.333 msaitoh if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) {
6783 1.98 msaitoh aprint_error_dev(dev, "couldn't alloc interrupt\n");
6784 1.98 msaitoh return ENXIO;
6785 1.98 msaitoh }
6786 1.333 msaitoh sc->osdep.nintrs = 1;
6787 1.333 msaitoh intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0],
6788 1.98 msaitoh intrbuf, sizeof(intrbuf));
6789 1.333 msaitoh sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc,
6790 1.333 msaitoh sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6791 1.98 msaitoh device_xname(dev));
6792 1.333 msaitoh intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]);
6793 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6794 1.98 msaitoh aprint_error_dev(dev,"unable to establish %s\n",
6795 1.98 msaitoh (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6796 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6797 1.333 msaitoh sc->osdep.intrs = NULL;
6798 1.98 msaitoh switch (intr_type) {
6799 1.98 msaitoh case PCI_INTR_TYPE_MSI:
6800 1.98 msaitoh /* The next try is for INTx: Disable MSI */
6801 1.98 msaitoh max_type = PCI_INTR_TYPE_INTX;
6802 1.98 msaitoh counts[PCI_INTR_TYPE_INTX] = 1;
6803 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6804 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6805 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6806 1.118 msaitoh goto alloc_retry;
6807 1.118 msaitoh } else
6808 1.118 msaitoh break;
6809 1.98 msaitoh case PCI_INTR_TYPE_INTX:
6810 1.98 msaitoh default:
6811 1.98 msaitoh /* See below */
6812 1.98 msaitoh break;
6813 1.98 msaitoh }
6814 1.98 msaitoh }
6815 1.119 msaitoh if (intr_type == PCI_INTR_TYPE_INTX) {
6816 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI;
6817 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6818 1.119 msaitoh }
6819 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) {
6820 1.98 msaitoh aprint_error_dev(dev,
6821 1.98 msaitoh "couldn't establish interrupt%s%s\n",
6822 1.98 msaitoh intrstr ? " at " : "", intrstr ? intrstr : "");
6823 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6824 1.333 msaitoh sc->osdep.intrs = NULL;
6825 1.98 msaitoh return ENXIO;
6826 1.98 msaitoh }
6827 1.98 msaitoh aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6828 1.98 msaitoh /*
6829 1.98 msaitoh * Try allocating a fast interrupt and the associated deferred
6830 1.98 msaitoh * processing contexts.
6831 1.98 msaitoh */
6832 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6833 1.99 msaitoh txr->txr_si =
6834 1.229 msaitoh softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6835 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6836 1.206 knakahar
6837 1.280 msaitoh snprintf(wqname, sizeof(wqname), "%sdeferTx",
6838 1.280 msaitoh device_xname(dev));
6839 1.333 msaitoh defertx_error = workqueue_create(&sc->txr_wq, wqname,
6840 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI,
6841 1.206 knakahar IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6842 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6843 1.206 knakahar }
6844 1.229 msaitoh que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6845 1.98 msaitoh ixgbe_handle_que, que);
6846 1.206 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6847 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
6848 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
6849 1.206 knakahar IXGBE_WORKQUEUE_FLAGS);
6850 1.48 msaitoh
6851 1.333 msaitoh if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)
6852 1.206 knakahar && ((txr->txr_si == NULL) || defertx_error != 0))
6853 1.206 knakahar || (que->que_si == NULL) || error != 0) {
6854 1.98 msaitoh aprint_error_dev(dev,
6855 1.185 msaitoh "could not establish software interrupts\n");
6856 1.99 msaitoh
6857 1.98 msaitoh return ENXIO;
6858 1.98 msaitoh }
6859 1.98 msaitoh /* For simplicity in the handlers */
6860 1.333 msaitoh sc->active_queues = IXGBE_EIMS_ENABLE_MASK;
6861 1.44 msaitoh
6862 1.44 msaitoh return (0);
6863 1.99 msaitoh } /* ixgbe_allocate_legacy */
6864 1.44 msaitoh
6865 1.99 msaitoh /************************************************************************
6866 1.99 msaitoh * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6867 1.99 msaitoh ************************************************************************/
6868 1.44 msaitoh static int
6869 1.333 msaitoh ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
6870 1.44 msaitoh {
6871 1.333 msaitoh device_t dev = sc->dev;
6872 1.341 msaitoh struct ix_queue *que = sc->queues;
6873 1.341 msaitoh struct tx_ring *txr = sc->tx_rings;
6874 1.98 msaitoh pci_chipset_tag_t pc;
6875 1.98 msaitoh char intrbuf[PCI_INTRSTR_LEN];
6876 1.98 msaitoh char intr_xname[32];
6877 1.128 knakahar char wqname[MAXCOMLEN];
6878 1.98 msaitoh const char *intrstr = NULL;
6879 1.186 msaitoh int error, vector = 0;
6880 1.98 msaitoh int cpu_id = 0;
6881 1.98 msaitoh kcpuset_t *affinity;
6882 1.99 msaitoh #ifdef RSS
6883 1.186 msaitoh unsigned int rss_buckets = 0;
6884 1.99 msaitoh kcpuset_t cpu_mask;
6885 1.98 msaitoh #endif
6886 1.98 msaitoh
6887 1.333 msaitoh pc = sc->osdep.pc;
6888 1.98 msaitoh #ifdef RSS
6889 1.98 msaitoh /*
6890 1.98 msaitoh * If we're doing RSS, the number of queues needs to
6891 1.98 msaitoh * match the number of RSS buckets that are configured.
6892 1.98 msaitoh *
6893 1.98 msaitoh * + If there's more queues than RSS buckets, we'll end
6894 1.98 msaitoh * up with queues that get no traffic.
6895 1.98 msaitoh *
6896 1.98 msaitoh * + If there's more RSS buckets than queues, we'll end
6897 1.98 msaitoh * up having multiple RSS buckets map to the same queue,
6898 1.98 msaitoh * so there'll be some contention.
6899 1.98 msaitoh */
6900 1.99 msaitoh rss_buckets = rss_getnumbuckets();
6901 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
6902 1.333 msaitoh (sc->num_queues != rss_buckets)) {
6903 1.98 msaitoh device_printf(dev,
6904 1.98 msaitoh "%s: number of queues (%d) != number of RSS buckets (%d)"
6905 1.98 msaitoh "; performance will be impacted.\n",
6906 1.333 msaitoh __func__, sc->num_queues, rss_buckets);
6907 1.98 msaitoh }
6908 1.98 msaitoh #endif
6909 1.98 msaitoh
6910 1.333 msaitoh sc->osdep.nintrs = sc->num_queues + 1;
6911 1.333 msaitoh if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
6912 1.333 msaitoh sc->osdep.nintrs) != 0) {
6913 1.98 msaitoh aprint_error_dev(dev,
6914 1.98 msaitoh "failed to allocate MSI-X interrupt\n");
6915 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
6916 1.98 msaitoh return (ENXIO);
6917 1.98 msaitoh }
6918 1.98 msaitoh
6919 1.98 msaitoh kcpuset_create(&affinity, false);
6920 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
6921 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6922 1.98 msaitoh device_xname(dev), i);
6923 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
6924 1.98 msaitoh sizeof(intrbuf));
6925 1.98 msaitoh #ifdef IXGBE_MPSAFE
6926 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
6927 1.98 msaitoh true);
6928 1.98 msaitoh #endif
6929 1.98 msaitoh /* Set the handler function */
6930 1.333 msaitoh que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
6931 1.333 msaitoh sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6932 1.98 msaitoh intr_xname);
6933 1.98 msaitoh if (que->res == NULL) {
6934 1.98 msaitoh aprint_error_dev(dev,
6935 1.98 msaitoh "Failed to register QUE handler\n");
6936 1.119 msaitoh error = ENXIO;
6937 1.119 msaitoh goto err_out;
6938 1.98 msaitoh }
6939 1.98 msaitoh que->msix = vector;
6940 1.333 msaitoh sc->active_queues |= 1ULL << que->msix;
6941 1.99 msaitoh
6942 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) {
6943 1.98 msaitoh #ifdef RSS
6944 1.99 msaitoh /*
6945 1.99 msaitoh * The queue ID is used as the RSS layer bucket ID.
6946 1.99 msaitoh * We look up the queue ID -> RSS CPU ID and select
6947 1.99 msaitoh * that.
6948 1.99 msaitoh */
6949 1.99 msaitoh cpu_id = rss_getcpu(i % rss_getnumbuckets());
6950 1.99 msaitoh CPU_SETOF(cpu_id, &cpu_mask);
6951 1.98 msaitoh #endif
6952 1.99 msaitoh } else {
6953 1.99 msaitoh /*
6954 1.99 msaitoh * Bind the MSI-X vector, and thus the
6955 1.99 msaitoh * rings to the corresponding CPU.
6956 1.99 msaitoh *
6957 1.99 msaitoh * This just happens to match the default RSS
6958 1.99 msaitoh * round-robin bucket -> queue -> CPU allocation.
6959 1.99 msaitoh */
6960 1.333 msaitoh if (sc->num_queues > 1)
6961 1.99 msaitoh cpu_id = i;
6962 1.99 msaitoh }
6963 1.98 msaitoh /* Round-robin affinity */
6964 1.98 msaitoh kcpuset_zero(affinity);
6965 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
6966 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[i], affinity,
6967 1.98 msaitoh NULL);
6968 1.98 msaitoh aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6969 1.98 msaitoh intrstr);
6970 1.98 msaitoh if (error == 0) {
6971 1.98 msaitoh #if 1 /* def IXGBE_DEBUG */
6972 1.98 msaitoh #ifdef RSS
6973 1.322 skrll aprint_normal(", bound RSS bucket %d to CPU %d", i,
6974 1.99 msaitoh cpu_id % ncpu);
6975 1.98 msaitoh #else
6976 1.99 msaitoh aprint_normal(", bound queue %d to cpu %d", i,
6977 1.99 msaitoh cpu_id % ncpu);
6978 1.98 msaitoh #endif
6979 1.98 msaitoh #endif /* IXGBE_DEBUG */
6980 1.98 msaitoh }
6981 1.98 msaitoh aprint_normal("\n");
6982 1.99 msaitoh
6983 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6984 1.99 msaitoh txr->txr_si = softint_establish(
6985 1.229 msaitoh SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6986 1.99 msaitoh ixgbe_deferred_mq_start, txr);
6987 1.119 msaitoh if (txr->txr_si == NULL) {
6988 1.119 msaitoh aprint_error_dev(dev,
6989 1.119 msaitoh "couldn't establish software interrupt\n");
6990 1.119 msaitoh error = ENXIO;
6991 1.119 msaitoh goto err_out;
6992 1.119 msaitoh }
6993 1.119 msaitoh }
6994 1.98 msaitoh que->que_si
6995 1.229 msaitoh = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6996 1.98 msaitoh ixgbe_handle_que, que);
6997 1.98 msaitoh if (que->que_si == NULL) {
6998 1.98 msaitoh aprint_error_dev(dev,
6999 1.185 msaitoh "couldn't establish software interrupt\n");
7000 1.119 msaitoh error = ENXIO;
7001 1.119 msaitoh goto err_out;
7002 1.98 msaitoh }
7003 1.98 msaitoh }
7004 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
7005 1.333 msaitoh error = workqueue_create(&sc->txr_wq, wqname,
7006 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7007 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7008 1.128 knakahar if (error) {
7009 1.280 msaitoh aprint_error_dev(dev,
7010 1.280 msaitoh "couldn't create workqueue for deferred Tx\n");
7011 1.128 knakahar goto err_out;
7012 1.128 knakahar }
7013 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
7014 1.128 knakahar
7015 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
7016 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname,
7017 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7018 1.128 knakahar IXGBE_WORKQUEUE_FLAGS);
7019 1.128 knakahar if (error) {
7020 1.128 knakahar aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
7021 1.128 knakahar goto err_out;
7022 1.128 knakahar }
7023 1.44 msaitoh
7024 1.98 msaitoh /* and Link */
7025 1.98 msaitoh cpu_id++;
7026 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
7027 1.333 msaitoh sc->vector = vector;
7028 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
7029 1.98 msaitoh sizeof(intrbuf));
7030 1.98 msaitoh #ifdef IXGBE_MPSAFE
7031 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE,
7032 1.98 msaitoh true);
7033 1.98 msaitoh #endif
7034 1.98 msaitoh /* Set the link handler function */
7035 1.333 msaitoh sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
7036 1.333 msaitoh sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc,
7037 1.98 msaitoh intr_xname);
7038 1.333 msaitoh if (sc->osdep.ihs[vector] == NULL) {
7039 1.98 msaitoh aprint_error_dev(dev, "Failed to register LINK handler\n");
7040 1.119 msaitoh error = ENXIO;
7041 1.119 msaitoh goto err_out;
7042 1.98 msaitoh }
7043 1.98 msaitoh /* Round-robin affinity */
7044 1.98 msaitoh kcpuset_zero(affinity);
7045 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu);
7046 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
7047 1.119 msaitoh NULL);
7048 1.44 msaitoh
7049 1.98 msaitoh aprint_normal_dev(dev,
7050 1.98 msaitoh "for link, interrupting at %s", intrstr);
7051 1.98 msaitoh if (error == 0)
7052 1.98 msaitoh aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
7053 1.44 msaitoh else
7054 1.98 msaitoh aprint_normal("\n");
7055 1.44 msaitoh
7056 1.98 msaitoh kcpuset_destroy(affinity);
7057 1.119 msaitoh aprint_normal_dev(dev,
7058 1.119 msaitoh "Using MSI-X interrupts with %d vectors\n", vector + 1);
7059 1.99 msaitoh
7060 1.44 msaitoh return (0);
7061 1.119 msaitoh
7062 1.119 msaitoh err_out:
7063 1.119 msaitoh kcpuset_destroy(affinity);
7064 1.333 msaitoh ixgbe_free_deferred_handlers(sc);
7065 1.333 msaitoh ixgbe_free_pciintr_resources(sc);
7066 1.119 msaitoh return (error);
7067 1.99 msaitoh } /* ixgbe_allocate_msix */
7068 1.44 msaitoh
7069 1.99 msaitoh /************************************************************************
7070 1.99 msaitoh * ixgbe_configure_interrupts
7071 1.99 msaitoh *
7072 1.99 msaitoh * Setup MSI-X, MSI, or legacy interrupts (in that order).
7073 1.99 msaitoh * This will also depend on user settings.
7074 1.99 msaitoh ************************************************************************/
7075 1.44 msaitoh static int
7076 1.333 msaitoh ixgbe_configure_interrupts(struct ixgbe_softc *sc)
7077 1.44 msaitoh {
7078 1.333 msaitoh device_t dev = sc->dev;
7079 1.333 msaitoh struct ixgbe_mac_info *mac = &sc->hw.mac;
7080 1.98 msaitoh int want, queues, msgs;
7081 1.44 msaitoh
7082 1.99 msaitoh /* Default to 1 queue if MSI-X setup fails */
7083 1.333 msaitoh sc->num_queues = 1;
7084 1.99 msaitoh
7085 1.98 msaitoh /* Override by tuneable */
7086 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX))
7087 1.98 msaitoh goto msi;
7088 1.44 msaitoh
7089 1.118 msaitoh /*
7090 1.118 msaitoh * NetBSD only: Use single vector MSI when number of CPU is 1 to save
7091 1.118 msaitoh * interrupt slot.
7092 1.118 msaitoh */
7093 1.118 msaitoh if (ncpu == 1)
7094 1.118 msaitoh goto msi;
7095 1.185 msaitoh
7096 1.99 msaitoh /* First try MSI-X */
7097 1.333 msaitoh msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
7098 1.98 msaitoh msgs = MIN(msgs, IXG_MAX_NINTR);
7099 1.98 msaitoh if (msgs < 2)
7100 1.98 msaitoh goto msi;
7101 1.44 msaitoh
7102 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7103 1.44 msaitoh
7104 1.98 msaitoh /* Figure out a reasonable auto config value */
7105 1.98 msaitoh queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
7106 1.44 msaitoh
7107 1.98 msaitoh #ifdef RSS
7108 1.98 msaitoh /* If we're doing RSS, clamp at the number of RSS buckets */
7109 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS)
7110 1.165 riastrad queues = uimin(queues, rss_getnumbuckets());
7111 1.98 msaitoh #endif
7112 1.99 msaitoh if (ixgbe_num_queues > queues) {
7113 1.333 msaitoh aprint_error_dev(sc->dev,
7114 1.319 msaitoh "ixgbe_num_queues (%d) is too large, "
7115 1.319 msaitoh "using reduced amount (%d).\n", ixgbe_num_queues, queues);
7116 1.99 msaitoh ixgbe_num_queues = queues;
7117 1.99 msaitoh }
7118 1.44 msaitoh
7119 1.98 msaitoh if (ixgbe_num_queues != 0)
7120 1.98 msaitoh queues = ixgbe_num_queues;
7121 1.98 msaitoh else
7122 1.165 riastrad queues = uimin(queues,
7123 1.165 riastrad uimin(mac->max_tx_queues, mac->max_rx_queues));
7124 1.44 msaitoh
7125 1.98 msaitoh /*
7126 1.99 msaitoh * Want one vector (RX/TX pair) per queue
7127 1.99 msaitoh * plus an additional for Link.
7128 1.99 msaitoh */
7129 1.98 msaitoh want = queues + 1;
7130 1.98 msaitoh if (msgs >= want)
7131 1.98 msaitoh msgs = want;
7132 1.44 msaitoh else {
7133 1.186 msaitoh aprint_error_dev(dev, "MSI-X Configuration Problem, "
7134 1.319 msaitoh "%d vectors but %d queues wanted!\n", msgs, want);
7135 1.98 msaitoh goto msi;
7136 1.44 msaitoh }
7137 1.333 msaitoh sc->num_queues = queues;
7138 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX;
7139 1.99 msaitoh return (0);
7140 1.44 msaitoh
7141 1.98 msaitoh /*
7142 1.99 msaitoh * MSI-X allocation failed or provided us with
7143 1.99 msaitoh * less vectors than needed. Free MSI-X resources
7144 1.99 msaitoh * and we'll try enabling MSI.
7145 1.99 msaitoh */
7146 1.98 msaitoh msi:
7147 1.99 msaitoh /* Without MSI-X, some features are no longer supported */
7148 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS;
7149 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS;
7150 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7151 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
7152 1.99 msaitoh
7153 1.333 msaitoh msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag);
7154 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX;
7155 1.99 msaitoh if (msgs > 1)
7156 1.99 msaitoh msgs = 1;
7157 1.99 msaitoh if (msgs != 0) {
7158 1.99 msaitoh msgs = 1;
7159 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI;
7160 1.99 msaitoh return (0);
7161 1.99 msaitoh }
7162 1.99 msaitoh
7163 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7164 1.99 msaitoh aprint_error_dev(dev,
7165 1.99 msaitoh "Device does not support legacy interrupts.\n");
7166 1.99 msaitoh return 1;
7167 1.99 msaitoh }
7168 1.99 msaitoh
7169 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7170 1.99 msaitoh
7171 1.99 msaitoh return (0);
7172 1.99 msaitoh } /* ixgbe_configure_interrupts */
7173 1.44 msaitoh
7174 1.48 msaitoh
7175 1.99 msaitoh /************************************************************************
7176 1.99 msaitoh * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7177 1.99 msaitoh *
7178 1.99 msaitoh * Done outside of interrupt context since the driver might sleep
7179 1.99 msaitoh ************************************************************************/
7180 1.26 msaitoh static void
7181 1.98 msaitoh ixgbe_handle_link(void *context)
7182 1.26 msaitoh {
7183 1.333 msaitoh struct ixgbe_softc *sc = context;
7184 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw;
7185 1.26 msaitoh
7186 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx));
7187 1.257 msaitoh
7188 1.333 msaitoh IXGBE_EVC_ADD(&sc->link_workev, 1);
7189 1.333 msaitoh ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0);
7190 1.333 msaitoh ixgbe_update_link_status(sc);
7191 1.26 msaitoh
7192 1.98 msaitoh /* Re-enable link interrupts */
7193 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7194 1.99 msaitoh } /* ixgbe_handle_link */
7195 1.45 msaitoh
7196 1.161 kamil #if 0
7197 1.99 msaitoh /************************************************************************
7198 1.99 msaitoh * ixgbe_rearm_queues
7199 1.99 msaitoh ************************************************************************/
7200 1.160 msaitoh static __inline void
7201 1.333 msaitoh ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues)
7202 1.63 msaitoh {
7203 1.63 msaitoh u32 mask;
7204 1.63 msaitoh
7205 1.333 msaitoh switch (sc->hw.mac.type) {
7206 1.63 msaitoh case ixgbe_mac_82598EB:
7207 1.63 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7208 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
7209 1.63 msaitoh break;
7210 1.63 msaitoh case ixgbe_mac_82599EB:
7211 1.63 msaitoh case ixgbe_mac_X540:
7212 1.63 msaitoh case ixgbe_mac_X550:
7213 1.63 msaitoh case ixgbe_mac_X550EM_x:
7214 1.99 msaitoh case ixgbe_mac_X550EM_a:
7215 1.63 msaitoh mask = (queues & 0xFFFFFFFF);
7216 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
7217 1.63 msaitoh mask = (queues >> 32);
7218 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
7219 1.63 msaitoh break;
7220 1.63 msaitoh default:
7221 1.63 msaitoh break;
7222 1.63 msaitoh }
7223 1.99 msaitoh } /* ixgbe_rearm_queues */
7224 1.161 kamil #endif
7225