ixgbe.c revision 1.274 1 /* $NetBSD: ixgbe.c,v 1.274 2020/12/26 06:10:17 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_phy.h"
74 #include "ixgbe_sriov.h"
75 #include "vlan.h"
76
77 #include <sys/cprng.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80
81 /************************************************************************
82 * Driver version
83 ************************************************************************/
84 static const char ixgbe_driver_version[] = "4.0.1-k";
85 /* XXX NetBSD: + 3.3.10 */
86
87 /************************************************************************
88 * PCI Device ID Table
89 *
90 * Used by probe to select devices to load on
91 * Last field stores an index into ixgbe_strings
92 * Last entry must be all 0s
93 *
94 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
95 ************************************************************************/
96 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
97 {
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
148 /* required last entry */
149 {0, 0, 0, 0, 0}
150 };
151
152 /************************************************************************
153 * Table of branding strings
154 ************************************************************************/
155 static const char *ixgbe_strings[] = {
156 "Intel(R) PRO/10GbE PCI-Express Network Driver"
157 };
158
159 /************************************************************************
160 * Function prototypes
161 ************************************************************************/
162 static int ixgbe_probe(device_t, cfdata_t, void *);
163 static void ixgbe_quirks(struct adapter *);
164 static void ixgbe_attach(device_t, device_t, void *);
165 static int ixgbe_detach(device_t, int);
166 #if 0
167 static int ixgbe_shutdown(device_t);
168 #endif
169 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
170 static bool ixgbe_resume(device_t, const pmf_qual_t *);
171 static int ixgbe_ifflags_cb(struct ethercom *);
172 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
173 static int ixgbe_init(struct ifnet *);
174 static void ixgbe_init_locked(struct adapter *);
175 static void ixgbe_ifstop(struct ifnet *, int);
176 static void ixgbe_stop_locked(void *);
177 static void ixgbe_init_device_features(struct adapter *);
178 static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
179 static void ixgbe_add_media_types(struct adapter *);
180 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
181 static int ixgbe_media_change(struct ifnet *);
182 static int ixgbe_allocate_pci_resources(struct adapter *,
183 const struct pci_attach_args *);
184 static void ixgbe_free_deferred_handlers(struct adapter *);
185 static void ixgbe_get_slot_info(struct adapter *);
186 static int ixgbe_allocate_msix(struct adapter *,
187 const struct pci_attach_args *);
188 static int ixgbe_allocate_legacy(struct adapter *,
189 const struct pci_attach_args *);
190 static int ixgbe_configure_interrupts(struct adapter *);
191 static void ixgbe_free_pciintr_resources(struct adapter *);
192 static void ixgbe_free_pci_resources(struct adapter *);
193 static void ixgbe_local_timer(void *);
194 static void ixgbe_handle_timer(struct work *, void *);
195 static void ixgbe_recovery_mode_timer(void *);
196 static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
197 static int ixgbe_setup_interface(device_t, struct adapter *);
198 static void ixgbe_config_gpie(struct adapter *);
199 static void ixgbe_config_dmac(struct adapter *);
200 static void ixgbe_config_delay_values(struct adapter *);
201 static void ixgbe_schedule_admin_tasklet(struct adapter *);
202 static void ixgbe_config_link(struct adapter *);
203 static void ixgbe_check_wol_support(struct adapter *);
204 static int ixgbe_setup_low_power_mode(struct adapter *);
205 #if 0
206 static void ixgbe_rearm_queues(struct adapter *, u64);
207 #endif
208
209 static void ixgbe_initialize_transmit_units(struct adapter *);
210 static void ixgbe_initialize_receive_units(struct adapter *);
211 static void ixgbe_enable_rx_drop(struct adapter *);
212 static void ixgbe_disable_rx_drop(struct adapter *);
213 static void ixgbe_initialize_rss_mapping(struct adapter *);
214
215 static void ixgbe_enable_intr(struct adapter *);
216 static void ixgbe_disable_intr(struct adapter *);
217 static void ixgbe_update_stats_counters(struct adapter *);
218 static void ixgbe_set_rxfilter(struct adapter *);
219 static void ixgbe_update_link_status(struct adapter *);
220 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
221 static void ixgbe_configure_ivars(struct adapter *);
222 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
223 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
224
225 static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
226 static void ixgbe_setup_vlan_hw_support(struct adapter *);
227 static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
228 static int ixgbe_register_vlan(struct adapter *, u16);
229 static int ixgbe_unregister_vlan(struct adapter *, u16);
230
231 static void ixgbe_add_device_sysctls(struct adapter *);
232 static void ixgbe_add_hw_stats(struct adapter *);
233 static void ixgbe_clear_evcnt(struct adapter *);
234 static int ixgbe_set_flowcntl(struct adapter *, int);
235 static int ixgbe_set_advertise(struct adapter *, int);
236 static int ixgbe_get_advertise(struct adapter *);
237
238 /* Sysctl handlers */
239 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
240 const char *, int *, int);
241 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
247 #ifdef IXGBE_DEBUG
248 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
250 #endif
251 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
252 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
253 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
254 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
255 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
256 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
257 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
258 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
259 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
260
261 /* Legacy (single vector) interrupt handler */
262 static int ixgbe_legacy_irq(void *);
263
264 /* The MSI/MSI-X Interrupt handlers */
265 static int ixgbe_msix_que(void *);
266 static int ixgbe_msix_admin(void *);
267
268 /* Event handlers running on workqueue */
269 static void ixgbe_handle_que(void *);
270 static void ixgbe_handle_link(void *);
271 static void ixgbe_handle_msf(void *);
272 static void ixgbe_handle_mod(void *, bool);
273 static void ixgbe_handle_phy(void *);
274
275 /* Deferred workqueue handlers */
276 static void ixgbe_handle_admin(struct work *, void *);
277 static void ixgbe_handle_que_work(struct work *, void *);
278
279 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
280
281 /************************************************************************
282 * NetBSD Device Interface Entry Points
283 ************************************************************************/
284 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
285 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
286 DVF_DETACH_SHUTDOWN);
287
288 #if 0
289 devclass_t ix_devclass;
290 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
291
292 MODULE_DEPEND(ix, pci, 1, 1, 1);
293 MODULE_DEPEND(ix, ether, 1, 1, 1);
294 #ifdef DEV_NETMAP
295 MODULE_DEPEND(ix, netmap, 1, 1, 1);
296 #endif
297 #endif
298
299 /*
300 * TUNEABLE PARAMETERS:
301 */
302
303 /*
304 * AIM: Adaptive Interrupt Moderation
305 * which means that the interrupt rate
306 * is varied over time based on the
307 * traffic for that interrupt vector
308 */
309 static bool ixgbe_enable_aim = true;
310 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
311 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
312 "Enable adaptive interrupt moderation");
313
314 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
315 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
316 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
317
318 /* How many packets rxeof tries to clean at a time */
319 static int ixgbe_rx_process_limit = 256;
320 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
321 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
322
323 /* How many packets txeof tries to clean at a time */
324 static int ixgbe_tx_process_limit = 256;
325 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
326 &ixgbe_tx_process_limit, 0,
327 "Maximum number of sent packets to process at a time, -1 means unlimited");
328
329 /* Flow control setting, default to full */
330 static int ixgbe_flow_control = ixgbe_fc_full;
331 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
332 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
333
334 /* Which packet processing uses workqueue or softint */
335 static bool ixgbe_txrx_workqueue = false;
336
337 /*
338 * Smart speed setting, default to on
339 * this only works as a compile option
340 * right now as its during attach, set
341 * this to 'ixgbe_smart_speed_off' to
342 * disable.
343 */
344 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
345
346 /*
347 * MSI-X should be the default for best performance,
348 * but this allows it to be forced off for testing.
349 */
350 static int ixgbe_enable_msix = 1;
351 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
352 "Enable MSI-X interrupts");
353
354 /*
355 * Number of Queues, can be set to 0,
356 * it then autoconfigures based on the
357 * number of cpus with a max of 8. This
358 * can be overridden manually here.
359 */
360 static int ixgbe_num_queues = 0;
361 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
362 "Number of queues to configure, 0 indicates autoconfigure");
363
364 /*
365 * Number of TX descriptors per ring,
366 * setting higher than RX as this seems
367 * the better performing choice.
368 */
369 static int ixgbe_txd = PERFORM_TXD;
370 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
371 "Number of transmit descriptors per queue");
372
373 /* Number of RX descriptors per ring */
374 static int ixgbe_rxd = PERFORM_RXD;
375 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
376 "Number of receive descriptors per queue");
377
378 /*
379 * Defining this on will allow the use
380 * of unsupported SFP+ modules, note that
381 * doing so you are on your own :)
382 */
383 static int allow_unsupported_sfp = false;
384 #define TUNABLE_INT(__x, __y)
385 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
386
387 /*
388 * Not sure if Flow Director is fully baked,
389 * so we'll default to turning it off.
390 */
391 static int ixgbe_enable_fdir = 0;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
393 "Enable Flow Director");
394
395 /* Legacy Transmit (single queue) */
396 static int ixgbe_enable_legacy_tx = 0;
397 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
398 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
399
400 /* Receive-Side Scaling */
401 static int ixgbe_enable_rss = 1;
402 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
403 "Enable Receive-Side Scaling (RSS)");
404
405 #if 0
406 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
407 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
408 #endif
409
410 #ifdef NET_MPSAFE
411 #define IXGBE_MPSAFE 1
412 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
413 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
414 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
415 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
416 #else
417 #define IXGBE_CALLOUT_FLAGS 0
418 #define IXGBE_SOFTINT_FLAGS 0
419 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
420 #define IXGBE_TASKLET_WQ_FLAGS 0
421 #endif
422 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
423
424 /************************************************************************
425 * ixgbe_initialize_rss_mapping
426 ************************************************************************/
427 static void
428 ixgbe_initialize_rss_mapping(struct adapter *adapter)
429 {
430 struct ixgbe_hw *hw = &adapter->hw;
431 u32 reta = 0, mrqc, rss_key[10];
432 int queue_id, table_size, index_mult;
433 int i, j;
434 u32 rss_hash_config;
435
436 /* force use default RSS key. */
437 #ifdef __NetBSD__
438 rss_getkey((uint8_t *) &rss_key);
439 #else
440 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
441 /* Fetch the configured RSS key */
442 rss_getkey((uint8_t *) &rss_key);
443 } else {
444 /* set up random bits */
445 cprng_fast(&rss_key, sizeof(rss_key));
446 }
447 #endif
448
449 /* Set multiplier for RETA setup and table size based on MAC */
450 index_mult = 0x1;
451 table_size = 128;
452 switch (adapter->hw.mac.type) {
453 case ixgbe_mac_82598EB:
454 index_mult = 0x11;
455 break;
456 case ixgbe_mac_X550:
457 case ixgbe_mac_X550EM_x:
458 case ixgbe_mac_X550EM_a:
459 table_size = 512;
460 break;
461 default:
462 break;
463 }
464
465 /* Set up the redirection table */
466 for (i = 0, j = 0; i < table_size; i++, j++) {
467 if (j == adapter->num_queues)
468 j = 0;
469
470 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
471 /*
472 * Fetch the RSS bucket id for the given indirection
473 * entry. Cap it at the number of configured buckets
474 * (which is num_queues.)
475 */
476 queue_id = rss_get_indirection_to_bucket(i);
477 queue_id = queue_id % adapter->num_queues;
478 } else
479 queue_id = (j * index_mult);
480
481 /*
482 * The low 8 bits are for hash value (n+0);
483 * The next 8 bits are for hash value (n+1), etc.
484 */
485 reta = reta >> 8;
486 reta = reta | (((uint32_t) queue_id) << 24);
487 if ((i & 3) == 3) {
488 if (i < 128)
489 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
490 else
491 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
492 reta);
493 reta = 0;
494 }
495 }
496
497 /* Now fill our hash function seeds */
498 for (i = 0; i < 10; i++)
499 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
500
501 /* Perform hash on these packet types */
502 if (adapter->feat_en & IXGBE_FEATURE_RSS)
503 rss_hash_config = rss_gethashconfig();
504 else {
505 /*
506 * Disable UDP - IP fragments aren't currently being handled
507 * and so we end up with a mix of 2-tuple and 4-tuple
508 * traffic.
509 */
510 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
511 | RSS_HASHTYPE_RSS_TCP_IPV4
512 | RSS_HASHTYPE_RSS_IPV6
513 | RSS_HASHTYPE_RSS_TCP_IPV6
514 | RSS_HASHTYPE_RSS_IPV6_EX
515 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
516 }
517
518 mrqc = IXGBE_MRQC_RSSEN;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
537 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
538 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
539 } /* ixgbe_initialize_rss_mapping */
540
541 /************************************************************************
542 * ixgbe_initialize_receive_units - Setup receive registers and features.
543 ************************************************************************/
544 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
545
546 static void
547 ixgbe_initialize_receive_units(struct adapter *adapter)
548 {
549 struct rx_ring *rxr = adapter->rx_rings;
550 struct ixgbe_hw *hw = &adapter->hw;
551 struct ifnet *ifp = adapter->ifp;
552 int i, j;
553 u32 bufsz, fctrl, srrctl, rxcsum;
554 u32 hlreg;
555
556 /*
557 * Make sure receives are disabled while
558 * setting up the descriptor ring
559 */
560 ixgbe_disable_rx(hw);
561
562 /* Enable broadcasts */
563 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
564 fctrl |= IXGBE_FCTRL_BAM;
565 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
566 fctrl |= IXGBE_FCTRL_DPF;
567 fctrl |= IXGBE_FCTRL_PMCF;
568 }
569 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
570
571 /* Set for Jumbo Frames? */
572 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
573 if (ifp->if_mtu > ETHERMTU)
574 hlreg |= IXGBE_HLREG0_JUMBOEN;
575 else
576 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
577
578 #ifdef DEV_NETMAP
579 /* CRC stripping is conditional in Netmap */
580 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
581 (ifp->if_capenable & IFCAP_NETMAP) &&
582 !ix_crcstrip)
583 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
584 else
585 #endif /* DEV_NETMAP */
586 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
587
588 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
589
590 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
591 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
592
593 for (i = 0; i < adapter->num_queues; i++, rxr++) {
594 u64 rdba = rxr->rxdma.dma_paddr;
595 u32 reg;
596 int regnum = i / 4; /* 1 register per 4 queues */
597 int regshift = i % 4; /* 4 bits per 1 queue */
598 j = rxr->me;
599
600 /* Setup the Base and Length of the Rx Descriptor Ring */
601 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
602 (rdba & 0x00000000ffffffffULL));
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
604 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
605 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
606
607 /* Set up the SRRCTL register */
608 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
609 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
610 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
611 srrctl |= bufsz;
612 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
613
614 /* Set RQSMR (Receive Queue Statistic Mapping) register */
615 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
616 reg &= ~(0x000000ffUL << (regshift * 8));
617 reg |= i << (regshift * 8);
618 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
619
620 /*
621 * Set DROP_EN iff we have no flow control and >1 queue.
622 * Note that srrctl was cleared shortly before during reset,
623 * so we do not need to clear the bit, but do it just in case
624 * this code is moved elsewhere.
625 */
626 if (adapter->num_queues > 1 &&
627 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
628 srrctl |= IXGBE_SRRCTL_DROP_EN;
629 } else {
630 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
631 }
632
633 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
634
635 /* Setup the HW Rx Head and Tail Descriptor Pointers */
636 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
637 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
638
639 /* Set the driver rx tail address */
640 rxr->tail = IXGBE_RDT(rxr->me);
641 }
642
643 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
644 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
645 | IXGBE_PSRTYPE_UDPHDR
646 | IXGBE_PSRTYPE_IPV4HDR
647 | IXGBE_PSRTYPE_IPV6HDR;
648 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
649 }
650
651 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
652
653 ixgbe_initialize_rss_mapping(adapter);
654
655 if (adapter->num_queues > 1) {
656 /* RSS and RX IPP Checksum are mutually exclusive */
657 rxcsum |= IXGBE_RXCSUM_PCSD;
658 }
659
660 if (ifp->if_capenable & IFCAP_RXCSUM)
661 rxcsum |= IXGBE_RXCSUM_PCSD;
662
663 /* This is useful for calculating UDP/IP fragment checksums */
664 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
665 rxcsum |= IXGBE_RXCSUM_IPPCSE;
666
667 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
668
669 } /* ixgbe_initialize_receive_units */
670
671 /************************************************************************
672 * ixgbe_initialize_transmit_units - Enable transmit units.
673 ************************************************************************/
674 static void
675 ixgbe_initialize_transmit_units(struct adapter *adapter)
676 {
677 struct tx_ring *txr = adapter->tx_rings;
678 struct ixgbe_hw *hw = &adapter->hw;
679 int i;
680
681 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 u32 tqsmreg, reg;
688 int regnum = i / 4; /* 1 register per 4 queues */
689 int regshift = i % 4; /* 4 bits per 1 queue */
690 int j = txr->me;
691
692 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
693 (tdba & 0x00000000ffffffffULL));
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
695 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
696 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
697
698 /*
699 * Set TQSMR (Transmit Queue Statistic Mapping) register.
700 * Register location is different between 82598 and others.
701 */
702 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
703 tqsmreg = IXGBE_TQSMR(regnum);
704 else
705 tqsmreg = IXGBE_TQSM(regnum);
706 reg = IXGBE_READ_REG(hw, tqsmreg);
707 reg &= ~(0x000000ffUL << (regshift * 8));
708 reg |= i << (regshift * 8);
709 IXGBE_WRITE_REG(hw, tqsmreg, reg);
710
711 /* Setup the HW Tx Head and Tail descriptor pointers */
712 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
713 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
714
715 /* Cache the tail address */
716 txr->tail = IXGBE_TDT(j);
717
718 txr->txr_no_space = false;
719
720 /* Disable Head Writeback */
721 /*
722 * Note: for X550 series devices, these registers are actually
723 * prefixed with TPH_ isntead of DCA_, but the addresses and
724 * fields remain the same.
725 */
726 switch (hw->mac.type) {
727 case ixgbe_mac_82598EB:
728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
729 break;
730 default:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
732 break;
733 }
734 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
735 switch (hw->mac.type) {
736 case ixgbe_mac_82598EB:
737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
738 break;
739 default:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
741 break;
742 }
743
744 }
745
746 if (hw->mac.type != ixgbe_mac_82598EB) {
747 u32 dmatxctl, rttdcs;
748
749 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
750 dmatxctl |= IXGBE_DMATXCTL_TE;
751 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
752 /* Disable arbiter to set MTQC */
753 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
754 rttdcs |= IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
757 ixgbe_get_mtqc(adapter->iov_mode));
758 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
759 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
760 }
761
762 return;
763 } /* ixgbe_initialize_transmit_units */
764
765 static void
766 ixgbe_quirks(struct adapter *adapter)
767 {
768 device_t dev = adapter->dev;
769 struct ixgbe_hw *hw = &adapter->hw;
770 const char *vendor, *product;
771
772 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
773 /*
774 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
775 * MA10-ST0.
776 */
777 vendor = pmf_get_platform("system-vendor");
778 product = pmf_get_platform("system-product");
779
780 if ((vendor == NULL) || (product == NULL))
781 return;
782
783 if ((strcmp(vendor, "GIGABYTE") == 0) &&
784 (strcmp(product, "MA10-ST0") == 0)) {
785 aprint_verbose_dev(dev,
786 "Enable SFP+ MOD_ABS inverse quirk\n");
787 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
788 }
789 }
790 }
791
792 /************************************************************************
793 * ixgbe_attach - Device initialization routine
794 *
795 * Called when the driver is being loaded.
796 * Identifies the type of hardware, allocates all resources
797 * and initializes the hardware.
798 *
799 * return 0 on success, positive on failure
800 ************************************************************************/
801 static void
802 ixgbe_attach(device_t parent, device_t dev, void *aux)
803 {
804 struct adapter *adapter;
805 struct ixgbe_hw *hw;
806 int error = -1;
807 u32 ctrl_ext;
808 u16 high, low, nvmreg;
809 pcireg_t id, subid;
810 const ixgbe_vendor_info_t *ent;
811 struct pci_attach_args *pa = aux;
812 bool unsupported_sfp = false;
813 const char *str;
814 char wqname[MAXCOMLEN];
815 char buf[256];
816
817 INIT_DEBUGOUT("ixgbe_attach: begin");
818
819 /* Allocate, clear, and link in our adapter structure */
820 adapter = device_private(dev);
821 adapter->hw.back = adapter;
822 adapter->dev = dev;
823 hw = &adapter->hw;
824 adapter->osdep.pc = pa->pa_pc;
825 adapter->osdep.tag = pa->pa_tag;
826 if (pci_dma64_available(pa))
827 adapter->osdep.dmat = pa->pa_dmat64;
828 else
829 adapter->osdep.dmat = pa->pa_dmat;
830 adapter->osdep.attached = false;
831 adapter->osdep.detaching = false;
832
833 ent = ixgbe_lookup(pa);
834
835 KASSERT(ent != NULL);
836
837 aprint_normal(": %s, Version - %s\n",
838 ixgbe_strings[ent->index], ixgbe_driver_version);
839
840 /* Core Lock Init */
841 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
842
843 /* Set up the timer callout and workqueue */
844 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
845 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
846 error = workqueue_create(&adapter->timer_wq, wqname,
847 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
848 IXGBE_TASKLET_WQ_FLAGS);
849 if (error) {
850 aprint_error_dev(dev,
851 "could not create timer workqueue (%d)\n", error);
852 goto err_out;
853 }
854
855 /* Determine hardware revision */
856 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
857 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
858
859 hw->vendor_id = PCI_VENDOR(id);
860 hw->device_id = PCI_PRODUCT(id);
861 hw->revision_id =
862 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
863 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
864 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
865
866 /* Set quirk flags */
867 ixgbe_quirks(adapter);
868
869 /*
870 * Make sure BUSMASTER is set
871 */
872 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
873
874 /* Do base PCI setup - map BAR0 */
875 if (ixgbe_allocate_pci_resources(adapter, pa)) {
876 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
877 error = ENXIO;
878 goto err_out;
879 }
880
881 /* let hardware know driver is loaded */
882 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
883 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
884 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
885
886 /*
887 * Initialize the shared code
888 */
889 if (ixgbe_init_shared_code(hw) != 0) {
890 aprint_error_dev(dev, "Unable to initialize the shared code\n");
891 error = ENXIO;
892 goto err_out;
893 }
894
895 switch (hw->mac.type) {
896 case ixgbe_mac_82598EB:
897 str = "82598EB";
898 break;
899 case ixgbe_mac_82599EB:
900 str = "82599EB";
901 break;
902 case ixgbe_mac_X540:
903 str = "X540";
904 break;
905 case ixgbe_mac_X550:
906 str = "X550";
907 break;
908 case ixgbe_mac_X550EM_x:
909 str = "X550EM X";
910 break;
911 case ixgbe_mac_X550EM_a:
912 str = "X550EM A";
913 break;
914 default:
915 str = "Unknown";
916 break;
917 }
918 aprint_normal_dev(dev, "device %s\n", str);
919
920 if (hw->mbx.ops.init_params)
921 hw->mbx.ops.init_params(hw);
922
923 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924
925 /* Pick up the 82599 settings */
926 if (hw->mac.type != ixgbe_mac_82598EB) {
927 hw->phy.smart_speed = ixgbe_smart_speed;
928 adapter->num_segs = IXGBE_82599_SCATTER;
929 } else
930 adapter->num_segs = IXGBE_82598_SCATTER;
931
932 /* Ensure SW/FW semaphore is free */
933 ixgbe_init_swfw_semaphore(hw);
934
935 hw->mac.ops.set_lan_id(hw);
936 ixgbe_init_device_features(adapter);
937
938 if (ixgbe_configure_interrupts(adapter)) {
939 error = ENXIO;
940 goto err_out;
941 }
942
943 /* Allocate multicast array memory. */
944 adapter->mta = malloc(sizeof(*adapter->mta) *
945 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
946
947 /* Enable WoL (if supported) */
948 ixgbe_check_wol_support(adapter);
949
950 /* Register for VLAN events */
951 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
952
953 /* Verify adapter fan is still functional (if applicable) */
954 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
955 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
956 ixgbe_check_fan_failure(adapter, esdp, FALSE);
957 }
958
959 /* Set an initial default flow control value */
960 hw->fc.requested_mode = ixgbe_flow_control;
961
962 /* Sysctls for limiting the amount of work done in the taskqueues */
963 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
964 "max number of rx packets to process",
965 &adapter->rx_process_limit, ixgbe_rx_process_limit);
966
967 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
968 "max number of tx packets to process",
969 &adapter->tx_process_limit, ixgbe_tx_process_limit);
970
971 /* Do descriptor calc and sanity checks */
972 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
973 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
974 aprint_error_dev(dev, "TXD config issue, using default!\n");
975 adapter->num_tx_desc = DEFAULT_TXD;
976 } else
977 adapter->num_tx_desc = ixgbe_txd;
978
979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 aprint_error_dev(dev, "RXD config issue, using default!\n");
982 adapter->num_rx_desc = DEFAULT_RXD;
983 } else
984 adapter->num_rx_desc = ixgbe_rxd;
985
986 /* Allocate our TX/RX Queues */
987 if (ixgbe_allocate_queues(adapter)) {
988 error = ENOMEM;
989 goto err_out;
990 }
991
992 hw->phy.reset_if_overtemp = TRUE;
993 error = ixgbe_reset_hw(hw);
994 hw->phy.reset_if_overtemp = FALSE;
995 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
996 error = IXGBE_SUCCESS;
997 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
998 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
999 unsupported_sfp = true;
1000 error = IXGBE_SUCCESS;
1001 } else if (error) {
1002 aprint_error_dev(dev, "Hardware initialization failed\n");
1003 error = EIO;
1004 goto err_late;
1005 }
1006
1007 /* Make sure we have a good EEPROM before we read from it */
1008 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1009 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1010 error = EIO;
1011 goto err_late;
1012 }
1013
1014 aprint_normal("%s:", device_xname(dev));
1015 /* NVM Image Version */
1016 high = low = 0;
1017 switch (hw->mac.type) {
1018 case ixgbe_mac_X540:
1019 case ixgbe_mac_X550EM_a:
1020 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1021 if (nvmreg == 0xffff)
1022 break;
1023 high = (nvmreg >> 12) & 0x0f;
1024 low = (nvmreg >> 4) & 0xff;
1025 id = nvmreg & 0x0f;
1026 aprint_normal(" NVM Image Version %u.", high);
1027 if (hw->mac.type == ixgbe_mac_X540)
1028 str = "%x";
1029 else
1030 str = "%02x";
1031 aprint_normal(str, low);
1032 aprint_normal(" ID 0x%x,", id);
1033 break;
1034 case ixgbe_mac_X550EM_x:
1035 case ixgbe_mac_X550:
1036 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1037 if (nvmreg == 0xffff)
1038 break;
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0xff;
1041 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1042 break;
1043 default:
1044 break;
1045 }
1046 hw->eeprom.nvm_image_ver_high = high;
1047 hw->eeprom.nvm_image_ver_low = low;
1048
1049 /* PHY firmware revision */
1050 switch (hw->mac.type) {
1051 case ixgbe_mac_X540:
1052 case ixgbe_mac_X550:
1053 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1054 if (nvmreg == 0xffff)
1055 break;
1056 high = (nvmreg >> 12) & 0x0f;
1057 low = (nvmreg >> 4) & 0xff;
1058 id = nvmreg & 0x000f;
1059 aprint_normal(" PHY FW Revision %u.", high);
1060 if (hw->mac.type == ixgbe_mac_X540)
1061 str = "%x";
1062 else
1063 str = "%02x";
1064 aprint_normal(str, low);
1065 aprint_normal(" ID 0x%x,", id);
1066 break;
1067 default:
1068 break;
1069 }
1070
1071 /* NVM Map version & OEM NVM Image version */
1072 switch (hw->mac.type) {
1073 case ixgbe_mac_X550:
1074 case ixgbe_mac_X550EM_x:
1075 case ixgbe_mac_X550EM_a:
1076 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1077 if (nvmreg != 0xffff) {
1078 high = (nvmreg >> 12) & 0x0f;
1079 low = nvmreg & 0x00ff;
1080 aprint_normal(" NVM Map version %u.%02x,", high, low);
1081 }
1082 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1083 if (nvmreg != 0xffff) {
1084 high = (nvmreg >> 12) & 0x0f;
1085 low = nvmreg & 0x00ff;
1086 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1087 low);
1088 }
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 /* Print the ETrackID */
1095 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1096 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1097 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1098
1099 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1100 error = ixgbe_allocate_msix(adapter, pa);
1101 if (error) {
1102 /* Free allocated queue structures first */
1103 ixgbe_free_queues(adapter);
1104
1105 /* Fallback to legacy interrupt */
1106 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1107 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1108 adapter->feat_en |= IXGBE_FEATURE_MSI;
1109 adapter->num_queues = 1;
1110
1111 /* Allocate our TX/RX Queues again */
1112 if (ixgbe_allocate_queues(adapter)) {
1113 error = ENOMEM;
1114 goto err_out;
1115 }
1116 }
1117 }
1118 /* Recovery mode */
1119 switch (adapter->hw.mac.type) {
1120 case ixgbe_mac_X550:
1121 case ixgbe_mac_X550EM_x:
1122 case ixgbe_mac_X550EM_a:
1123 /* >= 2.00 */
1124 if (hw->eeprom.nvm_image_ver_high >= 2) {
1125 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1126 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1127 }
1128 break;
1129 default:
1130 break;
1131 }
1132
1133 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1134 error = ixgbe_allocate_legacy(adapter, pa);
1135 if (error)
1136 goto err_late;
1137
1138 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1139 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1140 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1141 error = workqueue_create(&adapter->admin_wq, wqname,
1142 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1143 IXGBE_TASKLET_WQ_FLAGS);
1144 if (error) {
1145 aprint_error_dev(dev,
1146 "could not create admin workqueue (%d)\n", error);
1147 goto err_out;
1148 }
1149
1150 error = ixgbe_start_hw(hw);
1151 switch (error) {
1152 case IXGBE_ERR_EEPROM_VERSION:
1153 aprint_error_dev(dev, "This device is a pre-production adapter/"
1154 "LOM. Please be aware there may be issues associated "
1155 "with your hardware.\nIf you are experiencing problems "
1156 "please contact your Intel or hardware representative "
1157 "who provided you with this hardware.\n");
1158 break;
1159 default:
1160 break;
1161 }
1162
1163 /* Setup OS specific network interface */
1164 if (ixgbe_setup_interface(dev, adapter) != 0)
1165 goto err_late;
1166
1167 /*
1168 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1169 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1170 */
1171 if (hw->phy.media_type == ixgbe_media_type_copper) {
1172 uint16_t id1, id2;
1173 int oui, model, rev;
1174 const char *descr;
1175
1176 id1 = hw->phy.id >> 16;
1177 id2 = hw->phy.id & 0xffff;
1178 oui = MII_OUI(id1, id2);
1179 model = MII_MODEL(id2);
1180 rev = MII_REV(id2);
1181 if ((descr = mii_get_descr(oui, model)) != NULL)
1182 aprint_normal_dev(dev,
1183 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1184 descr, oui, model, rev);
1185 else
1186 aprint_normal_dev(dev,
1187 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1188 oui, model, rev);
1189 }
1190
1191 /* Enable EEE power saving */
1192 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1193 hw->mac.ops.setup_eee(hw,
1194 adapter->feat_en & IXGBE_FEATURE_EEE);
1195
1196 /* Enable power to the phy. */
1197 if (!unsupported_sfp) {
1198 /* Enable the optics for 82599 SFP+ fiber */
1199 ixgbe_enable_tx_laser(hw);
1200
1201 /*
1202 * XXX Currently, ixgbe_set_phy_power() supports only copper
1203 * PHY, so it's not required to test with !unsupported_sfp.
1204 */
1205 ixgbe_set_phy_power(hw, TRUE);
1206 }
1207
1208 /* Initialize statistics */
1209 ixgbe_update_stats_counters(adapter);
1210
1211 /* Check PCIE slot type/speed/width */
1212 ixgbe_get_slot_info(adapter);
1213
1214 /*
1215 * Do time init and sysctl init here, but
1216 * only on the first port of a bypass adapter.
1217 */
1218 ixgbe_bypass_init(adapter);
1219
1220 /* Set an initial dmac value */
1221 adapter->dmac = 0;
1222 /* Set initial advertised speeds (if applicable) */
1223 adapter->advertise = ixgbe_get_advertise(adapter);
1224
1225 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1226 ixgbe_define_iov_schemas(dev, &error);
1227
1228 /* Add sysctls */
1229 ixgbe_add_device_sysctls(adapter);
1230 ixgbe_add_hw_stats(adapter);
1231
1232 /* For Netmap */
1233 adapter->init_locked = ixgbe_init_locked;
1234 adapter->stop_locked = ixgbe_stop_locked;
1235
1236 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1237 ixgbe_netmap_attach(adapter);
1238
1239 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1240 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1241 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1242 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1243
1244 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1245 pmf_class_network_register(dev, adapter->ifp);
1246 else
1247 aprint_error_dev(dev, "couldn't establish power handler\n");
1248
1249 /* Init recovery mode timer and state variable */
1250 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1251 adapter->recovery_mode = 0;
1252
1253 /* Set up the timer callout */
1254 callout_init(&adapter->recovery_mode_timer,
1255 IXGBE_CALLOUT_FLAGS);
1256 snprintf(wqname, sizeof(wqname), "%s-recovery",
1257 device_xname(dev));
1258 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1259 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1260 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1261 if (error) {
1262 aprint_error_dev(dev, "could not create "
1263 "recovery_mode_timer workqueue (%d)\n", error);
1264 goto err_out;
1265 }
1266
1267 /* Start the task */
1268 callout_reset(&adapter->recovery_mode_timer, hz,
1269 ixgbe_recovery_mode_timer, adapter);
1270 }
1271
1272 INIT_DEBUGOUT("ixgbe_attach: end");
1273 adapter->osdep.attached = true;
1274
1275 return;
1276
1277 err_late:
1278 ixgbe_free_queues(adapter);
1279 err_out:
1280 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1281 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1283 ixgbe_free_deferred_handlers(adapter);
1284 ixgbe_free_pci_resources(adapter);
1285 if (adapter->mta != NULL)
1286 free(adapter->mta, M_DEVBUF);
1287 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1288 IXGBE_CORE_LOCK_DESTROY(adapter);
1289
1290 return;
1291 } /* ixgbe_attach */
1292
1293 /************************************************************************
1294 * ixgbe_check_wol_support
1295 *
1296 * Checks whether the adapter's ports are capable of
1297 * Wake On LAN by reading the adapter's NVM.
1298 *
1299 * Sets each port's hw->wol_enabled value depending
1300 * on the value read here.
1301 ************************************************************************/
1302 static void
1303 ixgbe_check_wol_support(struct adapter *adapter)
1304 {
1305 struct ixgbe_hw *hw = &adapter->hw;
1306 u16 dev_caps = 0;
1307
1308 /* Find out WoL support for port */
1309 adapter->wol_support = hw->wol_enabled = 0;
1310 ixgbe_get_device_caps(hw, &dev_caps);
1311 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1312 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1313 hw->bus.func == 0))
1314 adapter->wol_support = hw->wol_enabled = 1;
1315
1316 /* Save initial wake up filter configuration */
1317 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1318
1319 return;
1320 } /* ixgbe_check_wol_support */
1321
1322 /************************************************************************
1323 * ixgbe_setup_interface
1324 *
1325 * Setup networking device structure and register an interface.
1326 ************************************************************************/
1327 static int
1328 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1329 {
1330 struct ethercom *ec = &adapter->osdep.ec;
1331 struct ifnet *ifp;
1332 int rv;
1333
1334 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1335
1336 ifp = adapter->ifp = &ec->ec_if;
1337 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1338 ifp->if_baudrate = IF_Gbps(10);
1339 ifp->if_init = ixgbe_init;
1340 ifp->if_stop = ixgbe_ifstop;
1341 ifp->if_softc = adapter;
1342 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1343 #ifdef IXGBE_MPSAFE
1344 ifp->if_extflags = IFEF_MPSAFE;
1345 #endif
1346 ifp->if_ioctl = ixgbe_ioctl;
1347 #if __FreeBSD_version >= 1100045
1348 /* TSO parameters */
1349 ifp->if_hw_tsomax = 65518;
1350 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1351 ifp->if_hw_tsomaxsegsize = 2048;
1352 #endif
1353 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1354 #if 0
1355 ixgbe_start_locked = ixgbe_legacy_start_locked;
1356 #endif
1357 } else {
1358 ifp->if_transmit = ixgbe_mq_start;
1359 #if 0
1360 ixgbe_start_locked = ixgbe_mq_start_locked;
1361 #endif
1362 }
1363 ifp->if_start = ixgbe_legacy_start;
1364 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1365 IFQ_SET_READY(&ifp->if_snd);
1366
1367 rv = if_initialize(ifp);
1368 if (rv != 0) {
1369 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1370 return rv;
1371 }
1372 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1373 ether_ifattach(ifp, adapter->hw.mac.addr);
1374 aprint_normal_dev(dev, "Ethernet address %s\n",
1375 ether_sprintf(adapter->hw.mac.addr));
1376 /*
1377 * We use per TX queue softint, so if_deferred_start_init() isn't
1378 * used.
1379 */
1380 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1381
1382 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1383
1384 /*
1385 * Tell the upper layer(s) we support long frames.
1386 */
1387 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1388
1389 /* Set capability flags */
1390 ifp->if_capabilities |= IFCAP_RXCSUM
1391 | IFCAP_TXCSUM
1392 | IFCAP_TSOv4
1393 | IFCAP_TSOv6;
1394 ifp->if_capenable = 0;
1395
1396 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1397 | ETHERCAP_VLAN_HWCSUM
1398 | ETHERCAP_JUMBO_MTU
1399 | ETHERCAP_VLAN_MTU;
1400
1401 /* Enable the above capabilities by default */
1402 ec->ec_capenable = ec->ec_capabilities;
1403
1404 /*
1405 * Don't turn this on by default, if vlans are
1406 * created on another pseudo device (eg. lagg)
1407 * then vlan events are not passed thru, breaking
1408 * operation, but with HW FILTER off it works. If
1409 * using vlans directly on the ixgbe driver you can
1410 * enable this and get full hardware tag filtering.
1411 */
1412 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1413
1414 /*
1415 * Specify the media types supported by this adapter and register
1416 * callbacks to update media and link information
1417 */
1418 ec->ec_ifmedia = &adapter->media;
1419 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1420 ixgbe_media_status, &adapter->core_mtx);
1421
1422 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1423 ixgbe_add_media_types(adapter);
1424
1425 /* Set autoselect media by default */
1426 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1427
1428 if_register(ifp);
1429
1430 return (0);
1431 } /* ixgbe_setup_interface */
1432
1433 /************************************************************************
1434 * ixgbe_add_media_types
1435 ************************************************************************/
1436 static void
1437 ixgbe_add_media_types(struct adapter *adapter)
1438 {
1439 struct ixgbe_hw *hw = &adapter->hw;
1440 u64 layer;
1441
1442 layer = adapter->phy_layer;
1443
1444 #define ADD(mm, dd) \
1445 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1446
1447 ADD(IFM_NONE, 0);
1448
1449 /* Media types with matching NetBSD media defines */
1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1451 ADD(IFM_10G_T | IFM_FDX, 0);
1452 }
1453 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1454 ADD(IFM_1000_T | IFM_FDX, 0);
1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1457 ADD(IFM_100_TX | IFM_FDX, 0);
1458 }
1459 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1460 ADD(IFM_10_T | IFM_FDX, 0);
1461 }
1462
1463 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1464 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1465 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1466 }
1467
1468 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1469 ADD(IFM_10G_LR | IFM_FDX, 0);
1470 if (hw->phy.multispeed_fiber) {
1471 ADD(IFM_1000_LX | IFM_FDX, 0);
1472 }
1473 }
1474 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1475 ADD(IFM_10G_SR | IFM_FDX, 0);
1476 if (hw->phy.multispeed_fiber) {
1477 ADD(IFM_1000_SX | IFM_FDX, 0);
1478 }
1479 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1480 ADD(IFM_1000_SX | IFM_FDX, 0);
1481 }
1482 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1483 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1484 }
1485
1486 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1487 ADD(IFM_10G_KR | IFM_FDX, 0);
1488 }
1489 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1490 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1491 }
1492 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1493 ADD(IFM_1000_KX | IFM_FDX, 0);
1494 }
1495 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1496 ADD(IFM_2500_KX | IFM_FDX, 0);
1497 }
1498 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1499 ADD(IFM_2500_T | IFM_FDX, 0);
1500 }
1501 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1502 ADD(IFM_5000_T | IFM_FDX, 0);
1503 }
1504 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1505 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1506 /* XXX no ifmedia_set? */
1507
1508 ADD(IFM_AUTO, 0);
1509
1510 #undef ADD
1511 } /* ixgbe_add_media_types */
1512
1513 /************************************************************************
1514 * ixgbe_is_sfp
1515 ************************************************************************/
1516 static inline bool
1517 ixgbe_is_sfp(struct ixgbe_hw *hw)
1518 {
1519 switch (hw->mac.type) {
1520 case ixgbe_mac_82598EB:
1521 if (hw->phy.type == ixgbe_phy_nl)
1522 return (TRUE);
1523 return (FALSE);
1524 case ixgbe_mac_82599EB:
1525 case ixgbe_mac_X550EM_x:
1526 case ixgbe_mac_X550EM_a:
1527 switch (hw->mac.ops.get_media_type(hw)) {
1528 case ixgbe_media_type_fiber:
1529 case ixgbe_media_type_fiber_qsfp:
1530 return (TRUE);
1531 default:
1532 return (FALSE);
1533 }
1534 default:
1535 return (FALSE);
1536 }
1537 } /* ixgbe_is_sfp */
1538
1539 static void
1540 ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1541 {
1542
1543 KASSERT(mutex_owned(&adapter->admin_mtx));
1544
1545 if (__predict_true(adapter->osdep.detaching == false)) {
1546 if (adapter->admin_pending == 0)
1547 workqueue_enqueue(adapter->admin_wq,
1548 &adapter->admin_wc, NULL);
1549 adapter->admin_pending = 1;
1550 }
1551 }
1552
1553 /************************************************************************
1554 * ixgbe_config_link
1555 ************************************************************************/
1556 static void
1557 ixgbe_config_link(struct adapter *adapter)
1558 {
1559 struct ixgbe_hw *hw = &adapter->hw;
1560 u32 autoneg, err = 0;
1561 u32 task_requests = 0;
1562 bool sfp, negotiate = false;
1563
1564 sfp = ixgbe_is_sfp(hw);
1565
1566 if (sfp) {
1567 if (hw->phy.multispeed_fiber) {
1568 ixgbe_enable_tx_laser(hw);
1569 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1570 }
1571 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1572
1573 mutex_enter(&adapter->admin_mtx);
1574 adapter->task_requests |= task_requests;
1575 ixgbe_schedule_admin_tasklet(adapter);
1576 mutex_exit(&adapter->admin_mtx);
1577 } else {
1578 struct ifmedia *ifm = &adapter->media;
1579
1580 if (hw->mac.ops.check_link)
1581 err = ixgbe_check_link(hw, &adapter->link_speed,
1582 &adapter->link_up, FALSE);
1583 if (err)
1584 return;
1585
1586 /*
1587 * Check if it's the first call. If it's the first call,
1588 * get value for auto negotiation.
1589 */
1590 autoneg = hw->phy.autoneg_advertised;
1591 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1592 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1593 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1594 &negotiate);
1595 if (err)
1596 return;
1597 if (hw->mac.ops.setup_link)
1598 err = hw->mac.ops.setup_link(hw, autoneg,
1599 adapter->link_up);
1600 }
1601
1602 } /* ixgbe_config_link */
1603
1604 /************************************************************************
1605 * ixgbe_update_stats_counters - Update board statistics counters.
1606 ************************************************************************/
1607 static void
1608 ixgbe_update_stats_counters(struct adapter *adapter)
1609 {
1610 struct ifnet *ifp = adapter->ifp;
1611 struct ixgbe_hw *hw = &adapter->hw;
1612 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1613 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1614 u64 total_missed_rx = 0;
1615 uint64_t crcerrs, rlec;
1616 unsigned int queue_counters;
1617 int i;
1618
1619 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1620 stats->crcerrs.ev_count += crcerrs;
1621 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1622 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1623 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1624 if (hw->mac.type >= ixgbe_mac_X550)
1625 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1626
1627 /* 16 registers exist */
1628 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1629 for (i = 0; i < queue_counters; i++) {
1630 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1631 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1632 if (hw->mac.type >= ixgbe_mac_82599EB) {
1633 stats->qprdc[i].ev_count
1634 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1635 }
1636 }
1637
1638 /* 8 registers exist */
1639 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1640 uint32_t mp;
1641
1642 /* MPC */
1643 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1644 /* global total per queue */
1645 stats->mpc[i].ev_count += mp;
1646 /* running comprehensive total for stats display */
1647 total_missed_rx += mp;
1648
1649 if (hw->mac.type == ixgbe_mac_82598EB)
1650 stats->rnbc[i].ev_count
1651 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1652
1653 stats->pxontxc[i].ev_count
1654 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1655 stats->pxofftxc[i].ev_count
1656 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1657 if (hw->mac.type >= ixgbe_mac_82599EB) {
1658 stats->pxonrxc[i].ev_count
1659 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1660 stats->pxoffrxc[i].ev_count
1661 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1662 stats->pxon2offc[i].ev_count
1663 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1664 } else {
1665 stats->pxonrxc[i].ev_count
1666 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1667 stats->pxoffrxc[i].ev_count
1668 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1669 }
1670 }
1671 stats->mpctotal.ev_count += total_missed_rx;
1672
1673 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1674 if ((adapter->link_active == LINK_STATE_UP)
1675 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1676 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1677 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1678 }
1679 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1680 stats->rlec.ev_count += rlec;
1681
1682 /* Hardware workaround, gprc counts missed packets */
1683 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1684
1685 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1686 stats->lxontxc.ev_count += lxon;
1687 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1688 stats->lxofftxc.ev_count += lxoff;
1689 total = lxon + lxoff;
1690
1691 if (hw->mac.type != ixgbe_mac_82598EB) {
1692 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1693 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1694 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1695 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1696 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1697 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1698 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1699 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1700 } else {
1701 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1702 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1703 /* 82598 only has a counter in the high register */
1704 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1705 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1706 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1707 }
1708
1709 /*
1710 * Workaround: mprc hardware is incorrectly counting
1711 * broadcasts, so for now we subtract those.
1712 */
1713 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1714 stats->bprc.ev_count += bprc;
1715 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1716 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1717
1718 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1719 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1720 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1721 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1722 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1723 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1724
1725 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1726 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1727 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1728
1729 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1730 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1731 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1732 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1733 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1734 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1735 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1736 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1737 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1738 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1739 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1740 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1741 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1742 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1743 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1744 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1745 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1746 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1747 /* Only read FCOE on 82599 */
1748 if (hw->mac.type != ixgbe_mac_82598EB) {
1749 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1750 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1751 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1752 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1753 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1754 }
1755
1756 /*
1757 * Fill out the OS statistics structure. Only RX errors are required
1758 * here because all TX counters are incremented in the TX path and
1759 * normal RX counters are prepared in ether_input().
1760 */
1761 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1762 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1763 if_statadd_ref(nsr, if_ierrors, crcerrs + rlec);
1764 IF_STAT_PUTREF(ifp);
1765 } /* ixgbe_update_stats_counters */
1766
1767 /************************************************************************
1768 * ixgbe_add_hw_stats
1769 *
1770 * Add sysctl variables, one per statistic, to the system.
1771 ************************************************************************/
1772 static void
1773 ixgbe_add_hw_stats(struct adapter *adapter)
1774 {
1775 device_t dev = adapter->dev;
1776 const struct sysctlnode *rnode, *cnode;
1777 struct sysctllog **log = &adapter->sysctllog;
1778 struct tx_ring *txr = adapter->tx_rings;
1779 struct rx_ring *rxr = adapter->rx_rings;
1780 struct ixgbe_hw *hw = &adapter->hw;
1781 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1782 const char *xname = device_xname(dev);
1783 int i;
1784
1785 /* Driver Statistics */
1786 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1787 NULL, xname, "Driver tx dma soft fail EFBIG");
1788 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1789 NULL, xname, "m_defrag() failed");
1790 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1791 NULL, xname, "Driver tx dma hard fail EFBIG");
1792 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1793 NULL, xname, "Driver tx dma hard fail EINVAL");
1794 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1795 NULL, xname, "Driver tx dma hard fail other");
1796 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1797 NULL, xname, "Driver tx dma soft fail EAGAIN");
1798 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1799 NULL, xname, "Driver tx dma soft fail ENOMEM");
1800 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1801 NULL, xname, "Watchdog timeouts");
1802 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1803 NULL, xname, "TSO errors");
1804 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1805 NULL, xname, "Admin MSI-X IRQ Handled");
1806 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1807 NULL, xname, "Link event");
1808 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1809 NULL, xname, "SFP+ module event");
1810 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1811 NULL, xname, "Multispeed event");
1812 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1813 NULL, xname, "External PHY event");
1814
1815 /* Max number of traffic class is 8 */
1816 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1817 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1818 snprintf(adapter->tcs[i].evnamebuf,
1819 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1820 xname, i);
1821 if (i < __arraycount(stats->mpc)) {
1822 evcnt_attach_dynamic(&stats->mpc[i],
1823 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1824 "RX Missed Packet Count");
1825 if (hw->mac.type == ixgbe_mac_82598EB)
1826 evcnt_attach_dynamic(&stats->rnbc[i],
1827 EVCNT_TYPE_MISC, NULL,
1828 adapter->tcs[i].evnamebuf,
1829 "Receive No Buffers");
1830 }
1831 if (i < __arraycount(stats->pxontxc)) {
1832 evcnt_attach_dynamic(&stats->pxontxc[i],
1833 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1834 "pxontxc");
1835 evcnt_attach_dynamic(&stats->pxonrxc[i],
1836 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1837 "pxonrxc");
1838 evcnt_attach_dynamic(&stats->pxofftxc[i],
1839 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1840 "pxofftxc");
1841 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1842 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1843 "pxoffrxc");
1844 if (hw->mac.type >= ixgbe_mac_82599EB)
1845 evcnt_attach_dynamic(&stats->pxon2offc[i],
1846 EVCNT_TYPE_MISC, NULL,
1847 adapter->tcs[i].evnamebuf,
1848 "pxon2offc");
1849 }
1850 }
1851
1852 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1853 #ifdef LRO
1854 struct lro_ctrl *lro = &rxr->lro;
1855 #endif /* LRO */
1856
1857 snprintf(adapter->queues[i].evnamebuf,
1858 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1859 xname, i);
1860 snprintf(adapter->queues[i].namebuf,
1861 sizeof(adapter->queues[i].namebuf), "q%d", i);
1862
1863 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1864 aprint_error_dev(dev, "could not create sysctl root\n");
1865 break;
1866 }
1867
1868 if (sysctl_createv(log, 0, &rnode, &rnode,
1869 0, CTLTYPE_NODE,
1870 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1871 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1872 break;
1873
1874 if (sysctl_createv(log, 0, &rnode, &cnode,
1875 CTLFLAG_READWRITE, CTLTYPE_INT,
1876 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1877 ixgbe_sysctl_interrupt_rate_handler, 0,
1878 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1879 break;
1880
1881 if (sysctl_createv(log, 0, &rnode, &cnode,
1882 CTLFLAG_READONLY, CTLTYPE_INT,
1883 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1884 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1885 0, CTL_CREATE, CTL_EOL) != 0)
1886 break;
1887
1888 if (sysctl_createv(log, 0, &rnode, &cnode,
1889 CTLFLAG_READONLY, CTLTYPE_INT,
1890 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1891 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1892 0, CTL_CREATE, CTL_EOL) != 0)
1893 break;
1894
1895 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1896 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1897 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1898 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1899 "Handled queue in softint");
1900 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1901 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1902 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1903 NULL, adapter->queues[i].evnamebuf, "TSO");
1904 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1905 NULL, adapter->queues[i].evnamebuf,
1906 "Queue No Descriptor Available");
1907 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1908 NULL, adapter->queues[i].evnamebuf,
1909 "Queue Packets Transmitted");
1910 #ifndef IXGBE_LEGACY_TX
1911 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1912 NULL, adapter->queues[i].evnamebuf,
1913 "Packets dropped in pcq");
1914 #endif
1915
1916 if (sysctl_createv(log, 0, &rnode, &cnode,
1917 CTLFLAG_READONLY,
1918 CTLTYPE_INT,
1919 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1920 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1921 CTL_CREATE, CTL_EOL) != 0)
1922 break;
1923
1924 if (sysctl_createv(log, 0, &rnode, &cnode,
1925 CTLFLAG_READONLY,
1926 CTLTYPE_INT,
1927 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1928 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1929 CTL_CREATE, CTL_EOL) != 0)
1930 break;
1931
1932 if (sysctl_createv(log, 0, &rnode, &cnode,
1933 CTLFLAG_READONLY,
1934 CTLTYPE_INT,
1935 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1936 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1937 CTL_CREATE, CTL_EOL) != 0)
1938 break;
1939
1940 if (i < __arraycount(stats->qprc)) {
1941 evcnt_attach_dynamic(&stats->qprc[i],
1942 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1943 "qprc");
1944 evcnt_attach_dynamic(&stats->qptc[i],
1945 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1946 "qptc");
1947 evcnt_attach_dynamic(&stats->qbrc[i],
1948 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1949 "qbrc");
1950 evcnt_attach_dynamic(&stats->qbtc[i],
1951 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1952 "qbtc");
1953 if (hw->mac.type >= ixgbe_mac_82599EB)
1954 evcnt_attach_dynamic(&stats->qprdc[i],
1955 EVCNT_TYPE_MISC, NULL,
1956 adapter->queues[i].evnamebuf, "qprdc");
1957 }
1958
1959 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1960 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1961 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1962 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1963 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1964 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1965 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1966 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1967 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1968 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1969 #ifdef LRO
1970 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1971 CTLFLAG_RD, &lro->lro_queued, 0,
1972 "LRO Queued");
1973 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1974 CTLFLAG_RD, &lro->lro_flushed, 0,
1975 "LRO Flushed");
1976 #endif /* LRO */
1977 }
1978
1979 /* MAC stats get their own sub node */
1980
1981 snprintf(stats->namebuf,
1982 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1983
1984 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "rx csum offload - IP");
1986 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "rx csum offload - L4");
1988 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "rx csum offload - IP bad");
1990 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "rx csum offload - L4 bad");
1992 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Interrupt conditions zero");
1994 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Legacy interrupts");
1996
1997 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "CRC Errors");
1999 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "Illegal Byte Errors");
2001 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "Byte Errors");
2003 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "MAC Short Packets Discarded");
2005 if (hw->mac.type >= ixgbe_mac_X550)
2006 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Bad SFD");
2008 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Total Packets Missed");
2010 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "MAC Local Faults");
2012 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "MAC Remote Faults");
2014 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Receive Length Errors");
2016 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Link XON Transmitted");
2018 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Link XON Received");
2020 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "Link XOFF Transmitted");
2022 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Link XOFF Received");
2024
2025 /* Packet Reception Stats */
2026 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Total Octets Received");
2028 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Good Octets Received");
2030 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Total Packets Received");
2032 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Good Packets Received");
2034 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "Multicast Packets Received");
2036 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "Broadcast Packets Received");
2038 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "64 byte frames received ");
2040 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "65-127 byte frames received");
2042 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "128-255 byte frames received");
2044 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "256-511 byte frames received");
2046 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2047 stats->namebuf, "512-1023 byte frames received");
2048 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2049 stats->namebuf, "1023-1522 byte frames received");
2050 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2051 stats->namebuf, "Receive Undersized");
2052 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2053 stats->namebuf, "Fragmented Packets Received ");
2054 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2055 stats->namebuf, "Oversized Packets Received");
2056 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2057 stats->namebuf, "Received Jabber");
2058 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2059 stats->namebuf, "Management Packets Received");
2060 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2061 stats->namebuf, "Management Packets Dropped");
2062 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2063 stats->namebuf, "Checksum Errors");
2064
2065 /* Packet Transmission Stats */
2066 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2067 stats->namebuf, "Good Octets Transmitted");
2068 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2069 stats->namebuf, "Total Packets Transmitted");
2070 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2071 stats->namebuf, "Good Packets Transmitted");
2072 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2073 stats->namebuf, "Broadcast Packets Transmitted");
2074 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2075 stats->namebuf, "Multicast Packets Transmitted");
2076 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2077 stats->namebuf, "Management Packets Transmitted");
2078 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2079 stats->namebuf, "64 byte frames transmitted ");
2080 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2081 stats->namebuf, "65-127 byte frames transmitted");
2082 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2083 stats->namebuf, "128-255 byte frames transmitted");
2084 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2085 stats->namebuf, "256-511 byte frames transmitted");
2086 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2087 stats->namebuf, "512-1023 byte frames transmitted");
2088 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2089 stats->namebuf, "1024-1522 byte frames transmitted");
2090 } /* ixgbe_add_hw_stats */
2091
2092 static void
2093 ixgbe_clear_evcnt(struct adapter *adapter)
2094 {
2095 struct tx_ring *txr = adapter->tx_rings;
2096 struct rx_ring *rxr = adapter->rx_rings;
2097 struct ixgbe_hw *hw = &adapter->hw;
2098 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2099 int i;
2100
2101 adapter->efbig_tx_dma_setup.ev_count = 0;
2102 adapter->mbuf_defrag_failed.ev_count = 0;
2103 adapter->efbig2_tx_dma_setup.ev_count = 0;
2104 adapter->einval_tx_dma_setup.ev_count = 0;
2105 adapter->other_tx_dma_setup.ev_count = 0;
2106 adapter->eagain_tx_dma_setup.ev_count = 0;
2107 adapter->enomem_tx_dma_setup.ev_count = 0;
2108 adapter->tso_err.ev_count = 0;
2109 adapter->watchdog_events.ev_count = 0;
2110 adapter->admin_irqev.ev_count = 0;
2111 adapter->link_workev.ev_count = 0;
2112 adapter->mod_workev.ev_count = 0;
2113 adapter->msf_workev.ev_count = 0;
2114 adapter->phy_workev.ev_count = 0;
2115
2116 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2117 if (i < __arraycount(stats->mpc)) {
2118 stats->mpc[i].ev_count = 0;
2119 if (hw->mac.type == ixgbe_mac_82598EB)
2120 stats->rnbc[i].ev_count = 0;
2121 }
2122 if (i < __arraycount(stats->pxontxc)) {
2123 stats->pxontxc[i].ev_count = 0;
2124 stats->pxonrxc[i].ev_count = 0;
2125 stats->pxofftxc[i].ev_count = 0;
2126 stats->pxoffrxc[i].ev_count = 0;
2127 if (hw->mac.type >= ixgbe_mac_82599EB)
2128 stats->pxon2offc[i].ev_count = 0;
2129 }
2130 }
2131
2132 txr = adapter->tx_rings;
2133 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2134 adapter->queues[i].irqs.ev_count = 0;
2135 adapter->queues[i].handleq.ev_count = 0;
2136 adapter->queues[i].req.ev_count = 0;
2137 txr->no_desc_avail.ev_count = 0;
2138 txr->total_packets.ev_count = 0;
2139 txr->tso_tx.ev_count = 0;
2140 #ifndef IXGBE_LEGACY_TX
2141 txr->pcq_drops.ev_count = 0;
2142 #endif
2143 txr->q_efbig_tx_dma_setup = 0;
2144 txr->q_mbuf_defrag_failed = 0;
2145 txr->q_efbig2_tx_dma_setup = 0;
2146 txr->q_einval_tx_dma_setup = 0;
2147 txr->q_other_tx_dma_setup = 0;
2148 txr->q_eagain_tx_dma_setup = 0;
2149 txr->q_enomem_tx_dma_setup = 0;
2150 txr->q_tso_err = 0;
2151
2152 if (i < __arraycount(stats->qprc)) {
2153 stats->qprc[i].ev_count = 0;
2154 stats->qptc[i].ev_count = 0;
2155 stats->qbrc[i].ev_count = 0;
2156 stats->qbtc[i].ev_count = 0;
2157 if (hw->mac.type >= ixgbe_mac_82599EB)
2158 stats->qprdc[i].ev_count = 0;
2159 }
2160
2161 rxr->rx_packets.ev_count = 0;
2162 rxr->rx_bytes.ev_count = 0;
2163 rxr->rx_copies.ev_count = 0;
2164 rxr->no_jmbuf.ev_count = 0;
2165 rxr->rx_discarded.ev_count = 0;
2166 }
2167 stats->ipcs.ev_count = 0;
2168 stats->l4cs.ev_count = 0;
2169 stats->ipcs_bad.ev_count = 0;
2170 stats->l4cs_bad.ev_count = 0;
2171 stats->intzero.ev_count = 0;
2172 stats->legint.ev_count = 0;
2173 stats->crcerrs.ev_count = 0;
2174 stats->illerrc.ev_count = 0;
2175 stats->errbc.ev_count = 0;
2176 stats->mspdc.ev_count = 0;
2177 if (hw->mac.type >= ixgbe_mac_X550)
2178 stats->mbsdc.ev_count = 0;
2179 stats->mpctotal.ev_count = 0;
2180 stats->mlfc.ev_count = 0;
2181 stats->mrfc.ev_count = 0;
2182 stats->rlec.ev_count = 0;
2183 stats->lxontxc.ev_count = 0;
2184 stats->lxonrxc.ev_count = 0;
2185 stats->lxofftxc.ev_count = 0;
2186 stats->lxoffrxc.ev_count = 0;
2187
2188 /* Packet Reception Stats */
2189 stats->tor.ev_count = 0;
2190 stats->gorc.ev_count = 0;
2191 stats->tpr.ev_count = 0;
2192 stats->gprc.ev_count = 0;
2193 stats->mprc.ev_count = 0;
2194 stats->bprc.ev_count = 0;
2195 stats->prc64.ev_count = 0;
2196 stats->prc127.ev_count = 0;
2197 stats->prc255.ev_count = 0;
2198 stats->prc511.ev_count = 0;
2199 stats->prc1023.ev_count = 0;
2200 stats->prc1522.ev_count = 0;
2201 stats->ruc.ev_count = 0;
2202 stats->rfc.ev_count = 0;
2203 stats->roc.ev_count = 0;
2204 stats->rjc.ev_count = 0;
2205 stats->mngprc.ev_count = 0;
2206 stats->mngpdc.ev_count = 0;
2207 stats->xec.ev_count = 0;
2208
2209 /* Packet Transmission Stats */
2210 stats->gotc.ev_count = 0;
2211 stats->tpt.ev_count = 0;
2212 stats->gptc.ev_count = 0;
2213 stats->bptc.ev_count = 0;
2214 stats->mptc.ev_count = 0;
2215 stats->mngptc.ev_count = 0;
2216 stats->ptc64.ev_count = 0;
2217 stats->ptc127.ev_count = 0;
2218 stats->ptc255.ev_count = 0;
2219 stats->ptc511.ev_count = 0;
2220 stats->ptc1023.ev_count = 0;
2221 stats->ptc1522.ev_count = 0;
2222 }
2223
2224 /************************************************************************
2225 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2226 *
2227 * Retrieves the TDH value from the hardware
2228 ************************************************************************/
2229 static int
2230 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2231 {
2232 struct sysctlnode node = *rnode;
2233 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2234 struct adapter *adapter;
2235 uint32_t val;
2236
2237 if (!txr)
2238 return (0);
2239
2240 adapter = txr->adapter;
2241 if (ixgbe_fw_recovery_mode_swflag(adapter))
2242 return (EPERM);
2243
2244 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2245 node.sysctl_data = &val;
2246 return sysctl_lookup(SYSCTLFN_CALL(&node));
2247 } /* ixgbe_sysctl_tdh_handler */
2248
2249 /************************************************************************
2250 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2251 *
2252 * Retrieves the TDT value from the hardware
2253 ************************************************************************/
2254 static int
2255 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2256 {
2257 struct sysctlnode node = *rnode;
2258 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2259 struct adapter *adapter;
2260 uint32_t val;
2261
2262 if (!txr)
2263 return (0);
2264
2265 adapter = txr->adapter;
2266 if (ixgbe_fw_recovery_mode_swflag(adapter))
2267 return (EPERM);
2268
2269 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2270 node.sysctl_data = &val;
2271 return sysctl_lookup(SYSCTLFN_CALL(&node));
2272 } /* ixgbe_sysctl_tdt_handler */
2273
2274 /************************************************************************
2275 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2276 * handler function
2277 *
2278 * Retrieves the next_to_check value
2279 ************************************************************************/
2280 static int
2281 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2282 {
2283 struct sysctlnode node = *rnode;
2284 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2285 struct adapter *adapter;
2286 uint32_t val;
2287
2288 if (!rxr)
2289 return (0);
2290
2291 adapter = rxr->adapter;
2292 if (ixgbe_fw_recovery_mode_swflag(adapter))
2293 return (EPERM);
2294
2295 val = rxr->next_to_check;
2296 node.sysctl_data = &val;
2297 return sysctl_lookup(SYSCTLFN_CALL(&node));
2298 } /* ixgbe_sysctl_next_to_check_handler */
2299
2300 /************************************************************************
2301 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2302 *
2303 * Retrieves the RDH value from the hardware
2304 ************************************************************************/
2305 static int
2306 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2307 {
2308 struct sysctlnode node = *rnode;
2309 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2310 struct adapter *adapter;
2311 uint32_t val;
2312
2313 if (!rxr)
2314 return (0);
2315
2316 adapter = rxr->adapter;
2317 if (ixgbe_fw_recovery_mode_swflag(adapter))
2318 return (EPERM);
2319
2320 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2321 node.sysctl_data = &val;
2322 return sysctl_lookup(SYSCTLFN_CALL(&node));
2323 } /* ixgbe_sysctl_rdh_handler */
2324
2325 /************************************************************************
2326 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2327 *
2328 * Retrieves the RDT value from the hardware
2329 ************************************************************************/
2330 static int
2331 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2332 {
2333 struct sysctlnode node = *rnode;
2334 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2335 struct adapter *adapter;
2336 uint32_t val;
2337
2338 if (!rxr)
2339 return (0);
2340
2341 adapter = rxr->adapter;
2342 if (ixgbe_fw_recovery_mode_swflag(adapter))
2343 return (EPERM);
2344
2345 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2346 node.sysctl_data = &val;
2347 return sysctl_lookup(SYSCTLFN_CALL(&node));
2348 } /* ixgbe_sysctl_rdt_handler */
2349
2350 static int
2351 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2352 {
2353 struct ifnet *ifp = &ec->ec_if;
2354 struct adapter *adapter = ifp->if_softc;
2355 int rv;
2356
2357 if (set)
2358 rv = ixgbe_register_vlan(adapter, vid);
2359 else
2360 rv = ixgbe_unregister_vlan(adapter, vid);
2361
2362 if (rv != 0)
2363 return rv;
2364
2365 /*
2366 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2367 * or 0 to 1.
2368 */
2369 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2370 ixgbe_setup_vlan_hw_tagging(adapter);
2371
2372 return rv;
2373 }
2374
2375 /************************************************************************
2376 * ixgbe_register_vlan
2377 *
2378 * Run via vlan config EVENT, it enables us to use the
2379 * HW Filter table since we can get the vlan id. This
2380 * just creates the entry in the soft version of the
2381 * VFTA, init will repopulate the real table.
2382 ************************************************************************/
2383 static int
2384 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2385 {
2386 u16 index, bit;
2387 int error;
2388
2389 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2390 return EINVAL;
2391
2392 IXGBE_CORE_LOCK(adapter);
2393 index = (vtag >> 5) & 0x7F;
2394 bit = vtag & 0x1F;
2395 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2396 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2397 true);
2398 IXGBE_CORE_UNLOCK(adapter);
2399 if (error != 0)
2400 error = EACCES;
2401
2402 return error;
2403 } /* ixgbe_register_vlan */
2404
2405 /************************************************************************
2406 * ixgbe_unregister_vlan
2407 *
2408 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2409 ************************************************************************/
2410 static int
2411 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2412 {
2413 u16 index, bit;
2414 int error;
2415
2416 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2417 return EINVAL;
2418
2419 IXGBE_CORE_LOCK(adapter);
2420 index = (vtag >> 5) & 0x7F;
2421 bit = vtag & 0x1F;
2422 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2423 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2424 true);
2425 IXGBE_CORE_UNLOCK(adapter);
2426 if (error != 0)
2427 error = EACCES;
2428
2429 return error;
2430 } /* ixgbe_unregister_vlan */
2431
2432 static void
2433 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2434 {
2435 struct ethercom *ec = &adapter->osdep.ec;
2436 struct ixgbe_hw *hw = &adapter->hw;
2437 struct rx_ring *rxr;
2438 u32 ctrl;
2439 int i;
2440 bool hwtagging;
2441
2442 /* Enable HW tagging only if any vlan is attached */
2443 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2444 && VLAN_ATTACHED(ec);
2445
2446 /* Setup the queues for vlans */
2447 for (i = 0; i < adapter->num_queues; i++) {
2448 rxr = &adapter->rx_rings[i];
2449 /*
2450 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2451 */
2452 if (hw->mac.type != ixgbe_mac_82598EB) {
2453 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2454 if (hwtagging)
2455 ctrl |= IXGBE_RXDCTL_VME;
2456 else
2457 ctrl &= ~IXGBE_RXDCTL_VME;
2458 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2459 }
2460 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2461 }
2462
2463 /* VLAN hw tagging for 82598 */
2464 if (hw->mac.type == ixgbe_mac_82598EB) {
2465 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2466 if (hwtagging)
2467 ctrl |= IXGBE_VLNCTRL_VME;
2468 else
2469 ctrl &= ~IXGBE_VLNCTRL_VME;
2470 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2471 }
2472 } /* ixgbe_setup_vlan_hw_tagging */
2473
2474 static void
2475 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2476 {
2477 struct ethercom *ec = &adapter->osdep.ec;
2478 struct ixgbe_hw *hw = &adapter->hw;
2479 int i;
2480 u32 ctrl;
2481 struct vlanid_list *vlanidp;
2482
2483 /*
2484 * This function is called from both if_init and ifflags_cb()
2485 * on NetBSD.
2486 */
2487
2488 /*
2489 * Part 1:
2490 * Setup VLAN HW tagging
2491 */
2492 ixgbe_setup_vlan_hw_tagging(adapter);
2493
2494 /*
2495 * Part 2:
2496 * Setup VLAN HW filter
2497 */
2498 /* Cleanup shadow_vfta */
2499 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2500 adapter->shadow_vfta[i] = 0;
2501 /* Generate shadow_vfta from ec_vids */
2502 ETHER_LOCK(ec);
2503 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2504 uint32_t idx;
2505
2506 idx = vlanidp->vid / 32;
2507 KASSERT(idx < IXGBE_VFTA_SIZE);
2508 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2509 }
2510 ETHER_UNLOCK(ec);
2511 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2512 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2513
2514 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2515 /* Enable the Filter Table if enabled */
2516 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2517 ctrl |= IXGBE_VLNCTRL_VFE;
2518 else
2519 ctrl &= ~IXGBE_VLNCTRL_VFE;
2520 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2521 } /* ixgbe_setup_vlan_hw_support */
2522
2523 /************************************************************************
2524 * ixgbe_get_slot_info
2525 *
2526 * Get the width and transaction speed of
2527 * the slot this adapter is plugged into.
2528 ************************************************************************/
2529 static void
2530 ixgbe_get_slot_info(struct adapter *adapter)
2531 {
2532 device_t dev = adapter->dev;
2533 struct ixgbe_hw *hw = &adapter->hw;
2534 u32 offset;
2535 u16 link;
2536 int bus_info_valid = TRUE;
2537
2538 /* Some devices are behind an internal bridge */
2539 switch (hw->device_id) {
2540 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2541 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2542 goto get_parent_info;
2543 default:
2544 break;
2545 }
2546
2547 ixgbe_get_bus_info(hw);
2548
2549 /*
2550 * Some devices don't use PCI-E, but there is no need
2551 * to display "Unknown" for bus speed and width.
2552 */
2553 switch (hw->mac.type) {
2554 case ixgbe_mac_X550EM_x:
2555 case ixgbe_mac_X550EM_a:
2556 return;
2557 default:
2558 goto display;
2559 }
2560
2561 get_parent_info:
2562 /*
2563 * For the Quad port adapter we need to parse back
2564 * up the PCI tree to find the speed of the expansion
2565 * slot into which this adapter is plugged. A bit more work.
2566 */
2567 dev = device_parent(device_parent(dev));
2568 #if 0
2569 #ifdef IXGBE_DEBUG
2570 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2571 pci_get_slot(dev), pci_get_function(dev));
2572 #endif
2573 dev = device_parent(device_parent(dev));
2574 #ifdef IXGBE_DEBUG
2575 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2576 pci_get_slot(dev), pci_get_function(dev));
2577 #endif
2578 #endif
2579 /* Now get the PCI Express Capabilities offset */
2580 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2581 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2582 /*
2583 * Hmm...can't get PCI-Express capabilities.
2584 * Falling back to default method.
2585 */
2586 bus_info_valid = FALSE;
2587 ixgbe_get_bus_info(hw);
2588 goto display;
2589 }
2590 /* ...and read the Link Status Register */
2591 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2592 offset + PCIE_LCSR) >> 16;
2593 ixgbe_set_pci_config_data_generic(hw, link);
2594
2595 display:
2596 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2597 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2598 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2599 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2600 "Unknown"),
2601 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2602 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2603 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2604 "Unknown"));
2605
2606 if (bus_info_valid) {
2607 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2608 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2609 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2610 device_printf(dev, "PCI-Express bandwidth available"
2611 " for this card\n is not sufficient for"
2612 " optimal performance.\n");
2613 device_printf(dev, "For optimal performance a x8 "
2614 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2615 }
2616 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2617 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2618 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2619 device_printf(dev, "PCI-Express bandwidth available"
2620 " for this card\n is not sufficient for"
2621 " optimal performance.\n");
2622 device_printf(dev, "For optimal performance a x8 "
2623 "PCIE Gen3 slot is required.\n");
2624 }
2625 } else
2626 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2627
2628 return;
2629 } /* ixgbe_get_slot_info */
2630
2631 /************************************************************************
2632 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2633 ************************************************************************/
2634 static inline void
2635 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2636 {
2637 struct ixgbe_hw *hw = &adapter->hw;
2638 struct ix_queue *que = &adapter->queues[vector];
2639 u64 queue = 1ULL << vector;
2640 u32 mask;
2641
2642 mutex_enter(&que->dc_mtx);
2643 if (que->disabled_count > 0 && --que->disabled_count > 0)
2644 goto out;
2645
2646 if (hw->mac.type == ixgbe_mac_82598EB) {
2647 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2648 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2649 } else {
2650 mask = (queue & 0xFFFFFFFF);
2651 if (mask)
2652 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2653 mask = (queue >> 32);
2654 if (mask)
2655 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2656 }
2657 out:
2658 mutex_exit(&que->dc_mtx);
2659 } /* ixgbe_enable_queue */
2660
2661 /************************************************************************
2662 * ixgbe_disable_queue_internal
2663 ************************************************************************/
2664 static inline void
2665 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2666 {
2667 struct ixgbe_hw *hw = &adapter->hw;
2668 struct ix_queue *que = &adapter->queues[vector];
2669 u64 queue = 1ULL << vector;
2670 u32 mask;
2671
2672 mutex_enter(&que->dc_mtx);
2673
2674 if (que->disabled_count > 0) {
2675 if (nestok)
2676 que->disabled_count++;
2677 goto out;
2678 }
2679 que->disabled_count++;
2680
2681 if (hw->mac.type == ixgbe_mac_82598EB) {
2682 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2683 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2684 } else {
2685 mask = (queue & 0xFFFFFFFF);
2686 if (mask)
2687 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2688 mask = (queue >> 32);
2689 if (mask)
2690 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2691 }
2692 out:
2693 mutex_exit(&que->dc_mtx);
2694 } /* ixgbe_disable_queue_internal */
2695
2696 /************************************************************************
2697 * ixgbe_disable_queue
2698 ************************************************************************/
2699 static inline void
2700 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2701 {
2702
2703 ixgbe_disable_queue_internal(adapter, vector, true);
2704 } /* ixgbe_disable_queue */
2705
2706 /************************************************************************
2707 * ixgbe_sched_handle_que - schedule deferred packet processing
2708 ************************************************************************/
2709 static inline void
2710 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2711 {
2712
2713 if (que->txrx_use_workqueue) {
2714 /*
2715 * adapter->que_wq is bound to each CPU instead of
2716 * each NIC queue to reduce workqueue kthread. As we
2717 * should consider about interrupt affinity in this
2718 * function, the workqueue kthread must be WQ_PERCPU.
2719 * If create WQ_PERCPU workqueue kthread for each NIC
2720 * queue, that number of created workqueue kthread is
2721 * (number of used NIC queue) * (number of CPUs) =
2722 * (number of CPUs) ^ 2 most often.
2723 *
2724 * The same NIC queue's interrupts are avoided by
2725 * masking the queue's interrupt. And different
2726 * NIC queue's interrupts use different struct work
2727 * (que->wq_cookie). So, "enqueued flag" to avoid
2728 * twice workqueue_enqueue() is not required .
2729 */
2730 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2731 } else {
2732 softint_schedule(que->que_si);
2733 }
2734 }
2735
2736 /************************************************************************
2737 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2738 ************************************************************************/
2739 static int
2740 ixgbe_msix_que(void *arg)
2741 {
2742 struct ix_queue *que = arg;
2743 struct adapter *adapter = que->adapter;
2744 struct ifnet *ifp = adapter->ifp;
2745 struct tx_ring *txr = que->txr;
2746 struct rx_ring *rxr = que->rxr;
2747 bool more;
2748 u32 newitr = 0;
2749
2750 /* Protect against spurious interrupts */
2751 if ((ifp->if_flags & IFF_RUNNING) == 0)
2752 return 0;
2753
2754 ixgbe_disable_queue(adapter, que->msix);
2755 ++que->irqs.ev_count;
2756
2757 /*
2758 * Don't change "que->txrx_use_workqueue" from this point to avoid
2759 * flip-flopping softint/workqueue mode in one deferred processing.
2760 */
2761 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2762
2763 #ifdef __NetBSD__
2764 /* Don't run ixgbe_rxeof in interrupt context */
2765 more = true;
2766 #else
2767 more = ixgbe_rxeof(que);
2768 #endif
2769
2770 IXGBE_TX_LOCK(txr);
2771 ixgbe_txeof(txr);
2772 IXGBE_TX_UNLOCK(txr);
2773
2774 /* Do AIM now? */
2775
2776 if (adapter->enable_aim == false)
2777 goto no_calc;
2778 /*
2779 * Do Adaptive Interrupt Moderation:
2780 * - Write out last calculated setting
2781 * - Calculate based on average size over
2782 * the last interval.
2783 */
2784 if (que->eitr_setting)
2785 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2786
2787 que->eitr_setting = 0;
2788
2789 /* Idle, do nothing */
2790 if ((txr->bytes == 0) && (rxr->bytes == 0))
2791 goto no_calc;
2792
2793 if ((txr->bytes) && (txr->packets))
2794 newitr = txr->bytes/txr->packets;
2795 if ((rxr->bytes) && (rxr->packets))
2796 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2797 newitr += 24; /* account for hardware frame, crc */
2798
2799 /* set an upper boundary */
2800 newitr = uimin(newitr, 3000);
2801
2802 /* Be nice to the mid range */
2803 if ((newitr > 300) && (newitr < 1200))
2804 newitr = (newitr / 3);
2805 else
2806 newitr = (newitr / 2);
2807
2808 /*
2809 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2810 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2811 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2812 * on 1G and higher.
2813 */
2814 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2815 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2816 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2817 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2818 }
2819
2820 /* save for next interrupt */
2821 que->eitr_setting = newitr;
2822
2823 /* Reset state */
2824 txr->bytes = 0;
2825 txr->packets = 0;
2826 rxr->bytes = 0;
2827 rxr->packets = 0;
2828
2829 no_calc:
2830 if (more)
2831 ixgbe_sched_handle_que(adapter, que);
2832 else
2833 ixgbe_enable_queue(adapter, que->msix);
2834
2835 return 1;
2836 } /* ixgbe_msix_que */
2837
2838 /************************************************************************
2839 * ixgbe_media_status - Media Ioctl callback
2840 *
2841 * Called whenever the user queries the status of
2842 * the interface using ifconfig.
2843 ************************************************************************/
2844 static void
2845 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2846 {
2847 struct adapter *adapter = ifp->if_softc;
2848 struct ixgbe_hw *hw = &adapter->hw;
2849 int layer;
2850
2851 INIT_DEBUGOUT("ixgbe_media_status: begin");
2852 ixgbe_update_link_status(adapter);
2853
2854 ifmr->ifm_status = IFM_AVALID;
2855 ifmr->ifm_active = IFM_ETHER;
2856
2857 if (adapter->link_active != LINK_STATE_UP) {
2858 ifmr->ifm_active |= IFM_NONE;
2859 return;
2860 }
2861
2862 ifmr->ifm_status |= IFM_ACTIVE;
2863 layer = adapter->phy_layer;
2864
2865 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2866 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2867 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2868 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2869 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2870 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2871 switch (adapter->link_speed) {
2872 case IXGBE_LINK_SPEED_10GB_FULL:
2873 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2874 break;
2875 case IXGBE_LINK_SPEED_5GB_FULL:
2876 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2877 break;
2878 case IXGBE_LINK_SPEED_2_5GB_FULL:
2879 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2880 break;
2881 case IXGBE_LINK_SPEED_1GB_FULL:
2882 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2883 break;
2884 case IXGBE_LINK_SPEED_100_FULL:
2885 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2886 break;
2887 case IXGBE_LINK_SPEED_10_FULL:
2888 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2889 break;
2890 }
2891 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2892 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2893 switch (adapter->link_speed) {
2894 case IXGBE_LINK_SPEED_10GB_FULL:
2895 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2896 break;
2897 }
2898 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2899 switch (adapter->link_speed) {
2900 case IXGBE_LINK_SPEED_10GB_FULL:
2901 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2902 break;
2903 case IXGBE_LINK_SPEED_1GB_FULL:
2904 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2905 break;
2906 }
2907 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2908 switch (adapter->link_speed) {
2909 case IXGBE_LINK_SPEED_10GB_FULL:
2910 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2911 break;
2912 case IXGBE_LINK_SPEED_1GB_FULL:
2913 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2914 break;
2915 }
2916 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2917 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2918 switch (adapter->link_speed) {
2919 case IXGBE_LINK_SPEED_10GB_FULL:
2920 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2921 break;
2922 case IXGBE_LINK_SPEED_1GB_FULL:
2923 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2924 break;
2925 }
2926 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2927 switch (adapter->link_speed) {
2928 case IXGBE_LINK_SPEED_10GB_FULL:
2929 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2930 break;
2931 }
2932 /*
2933 * XXX: These need to use the proper media types once
2934 * they're added.
2935 */
2936 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2937 switch (adapter->link_speed) {
2938 case IXGBE_LINK_SPEED_10GB_FULL:
2939 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2940 break;
2941 case IXGBE_LINK_SPEED_2_5GB_FULL:
2942 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2943 break;
2944 case IXGBE_LINK_SPEED_1GB_FULL:
2945 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2946 break;
2947 }
2948 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2949 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2950 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2951 switch (adapter->link_speed) {
2952 case IXGBE_LINK_SPEED_10GB_FULL:
2953 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2954 break;
2955 case IXGBE_LINK_SPEED_2_5GB_FULL:
2956 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2957 break;
2958 case IXGBE_LINK_SPEED_1GB_FULL:
2959 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2960 break;
2961 }
2962
2963 /* If nothing is recognized... */
2964 #if 0
2965 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2966 ifmr->ifm_active |= IFM_UNKNOWN;
2967 #endif
2968
2969 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2970
2971 /* Display current flow control setting used on link */
2972 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2973 hw->fc.current_mode == ixgbe_fc_full)
2974 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2975 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2976 hw->fc.current_mode == ixgbe_fc_full)
2977 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2978
2979 return;
2980 } /* ixgbe_media_status */
2981
2982 /************************************************************************
2983 * ixgbe_media_change - Media Ioctl callback
2984 *
2985 * Called when the user changes speed/duplex using
2986 * media/mediopt option with ifconfig.
2987 ************************************************************************/
2988 static int
2989 ixgbe_media_change(struct ifnet *ifp)
2990 {
2991 struct adapter *adapter = ifp->if_softc;
2992 struct ifmedia *ifm = &adapter->media;
2993 struct ixgbe_hw *hw = &adapter->hw;
2994 ixgbe_link_speed speed = 0;
2995 ixgbe_link_speed link_caps = 0;
2996 bool negotiate = false;
2997 s32 err = IXGBE_NOT_IMPLEMENTED;
2998
2999 INIT_DEBUGOUT("ixgbe_media_change: begin");
3000
3001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3002 return (EINVAL);
3003
3004 if (hw->phy.media_type == ixgbe_media_type_backplane)
3005 return (EPERM);
3006
3007 /*
3008 * We don't actually need to check against the supported
3009 * media types of the adapter; ifmedia will take care of
3010 * that for us.
3011 */
3012 switch (IFM_SUBTYPE(ifm->ifm_media)) {
3013 case IFM_AUTO:
3014 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
3015 &negotiate);
3016 if (err != IXGBE_SUCCESS) {
3017 device_printf(adapter->dev, "Unable to determine "
3018 "supported advertise speeds\n");
3019 return (ENODEV);
3020 }
3021 speed |= link_caps;
3022 break;
3023 case IFM_10G_T:
3024 case IFM_10G_LRM:
3025 case IFM_10G_LR:
3026 case IFM_10G_TWINAX:
3027 case IFM_10G_SR:
3028 case IFM_10G_CX4:
3029 case IFM_10G_KR:
3030 case IFM_10G_KX4:
3031 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3032 break;
3033 case IFM_5000_T:
3034 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3035 break;
3036 case IFM_2500_T:
3037 case IFM_2500_KX:
3038 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3039 break;
3040 case IFM_1000_T:
3041 case IFM_1000_LX:
3042 case IFM_1000_SX:
3043 case IFM_1000_KX:
3044 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3045 break;
3046 case IFM_100_TX:
3047 speed |= IXGBE_LINK_SPEED_100_FULL;
3048 break;
3049 case IFM_10_T:
3050 speed |= IXGBE_LINK_SPEED_10_FULL;
3051 break;
3052 case IFM_NONE:
3053 break;
3054 default:
3055 goto invalid;
3056 }
3057
3058 hw->mac.autotry_restart = TRUE;
3059 hw->mac.ops.setup_link(hw, speed, TRUE);
3060 adapter->advertise = 0;
3061 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3062 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3063 adapter->advertise |= 1 << 2;
3064 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3065 adapter->advertise |= 1 << 1;
3066 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3067 adapter->advertise |= 1 << 0;
3068 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3069 adapter->advertise |= 1 << 3;
3070 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3071 adapter->advertise |= 1 << 4;
3072 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3073 adapter->advertise |= 1 << 5;
3074 }
3075
3076 return (0);
3077
3078 invalid:
3079 device_printf(adapter->dev, "Invalid media type!\n");
3080
3081 return (EINVAL);
3082 } /* ixgbe_media_change */
3083
3084 /************************************************************************
3085 * ixgbe_msix_admin - Link status change ISR (MSI/MSI-X)
3086 ************************************************************************/
3087 static int
3088 ixgbe_msix_admin(void *arg)
3089 {
3090 struct adapter *adapter = arg;
3091 struct ixgbe_hw *hw = &adapter->hw;
3092 u32 eicr, eicr_mask;
3093 u32 eims_orig;
3094 u32 eims_disable = 0;
3095 u32 task_requests = 0;
3096 s32 retval;
3097
3098 ++adapter->admin_irqev.ev_count;
3099
3100 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
3101 /* Pause other interrupts */
3102 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK);
3103
3104 /*
3105 * First get the cause.
3106 *
3107 * The specifications of 82598, 82599, X540 and X550 say EICS register
3108 * is write only. However, Linux says it is a workaround for silicon
3109 * errata to read EICS instead of EICR to get interrupt cause.
3110 * At least, reading EICR clears lower 16bits of EIMS on 82598.
3111 */
3112 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3113 /* Be sure the queue bits are not cleared */
3114 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3115 /* Clear all OTHER interrupts with write */
3116 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3117
3118 /* Link status change */
3119 if (eicr & IXGBE_EICR_LSC) {
3120 task_requests |= IXGBE_REQUEST_TASK_LSC;
3121 eims_disable |= IXGBE_EIMS_LSC;
3122 }
3123
3124 if (ixgbe_is_sfp(hw)) {
3125 /* Pluggable optics-related interrupt */
3126 if (hw->mac.type >= ixgbe_mac_X540)
3127 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3128 else
3129 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3130
3131 /*
3132 * An interrupt might not arrive when a module is inserted.
3133 * When an link status change interrupt occurred and the driver
3134 * still regard SFP as unplugged, issue the module softint
3135 * and then issue LSC interrupt.
3136 */
3137 if ((eicr & eicr_mask)
3138 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3139 && (eicr & IXGBE_EICR_LSC))) {
3140 task_requests |= IXGBE_REQUEST_TASK_MOD;
3141 eims_disable |= IXGBE_EIMS_LSC;
3142 }
3143
3144 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3145 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3146 task_requests |= IXGBE_REQUEST_TASK_MSF;
3147 eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3148 }
3149 }
3150
3151 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3152 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3153 (eicr & IXGBE_EICR_FLOW_DIR)) {
3154 /* This is probably overkill :) */
3155 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3156 return 1;
3157 task_requests |= IXGBE_REQUEST_TASK_FDIR;
3158 /* Disable the interrupt */
3159 eims_disable |= IXGBE_EIMS_FLOW_DIR;
3160 }
3161
3162 if (eicr & IXGBE_EICR_ECC) {
3163 device_printf(adapter->dev,
3164 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3165 /* Disable interrupt to prevent log spam */
3166 eims_disable |= IXGBE_EICR_ECC;
3167 }
3168
3169 /* Check for over temp condition */
3170 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3171 switch (adapter->hw.mac.type) {
3172 case ixgbe_mac_X550EM_a:
3173 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3174 break;
3175 /* Disable interrupt to prevent log spam */
3176 eims_disable |= IXGBE_EICR_GPI_SDP0_X550EM_a;
3177
3178 retval = hw->phy.ops.check_overtemp(hw);
3179 if (retval != IXGBE_ERR_OVERTEMP)
3180 break;
3181 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3182 device_printf(adapter->dev, "System shutdown required!\n");
3183 break;
3184 default:
3185 if (!(eicr & IXGBE_EICR_TS))
3186 break;
3187 /* Disable interrupt to prevent log spam */
3188 eims_disable |= IXGBE_EIMS_TS;
3189
3190 retval = hw->phy.ops.check_overtemp(hw);
3191 if (retval != IXGBE_ERR_OVERTEMP)
3192 break;
3193 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3194 device_printf(adapter->dev, "System shutdown required!\n");
3195 break;
3196 }
3197 }
3198
3199 /* Check for VF message */
3200 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3201 (eicr & IXGBE_EICR_MAILBOX)) {
3202 task_requests |= IXGBE_REQUEST_TASK_MBX;
3203 eims_disable |= IXGBE_EIMS_MAILBOX;
3204 }
3205 }
3206
3207 /* Check for fan failure */
3208 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3209 retval = ixgbe_check_fan_failure(adapter, eicr, true);
3210 if (retval == IXGBE_ERR_FAN_FAILURE) {
3211 /* Disable interrupt to prevent log spam */
3212 eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3213 }
3214 }
3215
3216 /* External PHY interrupt */
3217 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3218 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3219 task_requests |= IXGBE_REQUEST_TASK_PHY;
3220 eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
3221 }
3222
3223 if (task_requests != 0) {
3224 mutex_enter(&adapter->admin_mtx);
3225 adapter->task_requests |= task_requests;
3226 ixgbe_schedule_admin_tasklet(adapter);
3227 mutex_exit(&adapter->admin_mtx);
3228 }
3229
3230 /* Re-enable some OTHER interrupts */
3231 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable);
3232
3233 return 1;
3234 } /* ixgbe_msix_admin */
3235
3236 static void
3237 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3238 {
3239
3240 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3241 itr |= itr << 16;
3242 else
3243 itr |= IXGBE_EITR_CNT_WDIS;
3244
3245 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3246 }
3247
3248
3249 /************************************************************************
3250 * ixgbe_sysctl_interrupt_rate_handler
3251 ************************************************************************/
3252 static int
3253 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3254 {
3255 struct sysctlnode node = *rnode;
3256 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3257 struct adapter *adapter;
3258 uint32_t reg, usec, rate;
3259 int error;
3260
3261 if (que == NULL)
3262 return 0;
3263
3264 adapter = que->adapter;
3265 if (ixgbe_fw_recovery_mode_swflag(adapter))
3266 return (EPERM);
3267
3268 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3269 usec = ((reg & 0x0FF8) >> 3);
3270 if (usec > 0)
3271 rate = 500000 / usec;
3272 else
3273 rate = 0;
3274 node.sysctl_data = &rate;
3275 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3276 if (error || newp == NULL)
3277 return error;
3278 reg &= ~0xfff; /* default, no limitation */
3279 if (rate > 0 && rate < 500000) {
3280 if (rate < 1000)
3281 rate = 1000;
3282 reg |= ((4000000 / rate) & 0xff8);
3283 /*
3284 * When RSC is used, ITR interval must be larger than
3285 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3286 * The minimum value is always greater than 2us on 100M
3287 * (and 10M?(not documented)), but it's not on 1G and higher.
3288 */
3289 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3290 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3291 if ((adapter->num_queues > 1)
3292 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3293 return EINVAL;
3294 }
3295 ixgbe_max_interrupt_rate = rate;
3296 } else
3297 ixgbe_max_interrupt_rate = 0;
3298 ixgbe_eitr_write(adapter, que->msix, reg);
3299
3300 return (0);
3301 } /* ixgbe_sysctl_interrupt_rate_handler */
3302
3303 const struct sysctlnode *
3304 ixgbe_sysctl_instance(struct adapter *adapter)
3305 {
3306 const char *dvname;
3307 struct sysctllog **log;
3308 int rc;
3309 const struct sysctlnode *rnode;
3310
3311 if (adapter->sysctltop != NULL)
3312 return adapter->sysctltop;
3313
3314 log = &adapter->sysctllog;
3315 dvname = device_xname(adapter->dev);
3316
3317 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3318 0, CTLTYPE_NODE, dvname,
3319 SYSCTL_DESCR("ixgbe information and settings"),
3320 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3321 goto err;
3322
3323 return rnode;
3324 err:
3325 device_printf(adapter->dev,
3326 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3327 return NULL;
3328 }
3329
3330 /************************************************************************
3331 * ixgbe_add_device_sysctls
3332 ************************************************************************/
3333 static void
3334 ixgbe_add_device_sysctls(struct adapter *adapter)
3335 {
3336 device_t dev = adapter->dev;
3337 struct ixgbe_hw *hw = &adapter->hw;
3338 struct sysctllog **log;
3339 const struct sysctlnode *rnode, *cnode;
3340
3341 log = &adapter->sysctllog;
3342
3343 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3344 aprint_error_dev(dev, "could not create sysctl root\n");
3345 return;
3346 }
3347
3348 if (sysctl_createv(log, 0, &rnode, &cnode,
3349 CTLFLAG_READWRITE, CTLTYPE_INT,
3350 "debug", SYSCTL_DESCR("Debug Info"),
3351 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n");
3353
3354 if (sysctl_createv(log, 0, &rnode, &cnode,
3355 CTLFLAG_READONLY, CTLTYPE_INT,
3356 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3357 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3358 aprint_error_dev(dev, "could not create sysctl\n");
3359
3360 if (sysctl_createv(log, 0, &rnode, &cnode,
3361 CTLFLAG_READONLY, CTLTYPE_INT,
3362 "num_queues", SYSCTL_DESCR("Number of queues"),
3363 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3364 aprint_error_dev(dev, "could not create sysctl\n");
3365
3366 /* Sysctls for all devices */
3367 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3368 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3369 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3370 CTL_EOL) != 0)
3371 aprint_error_dev(dev, "could not create sysctl\n");
3372
3373 adapter->enable_aim = ixgbe_enable_aim;
3374 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3375 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3376 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3377 aprint_error_dev(dev, "could not create sysctl\n");
3378
3379 if (sysctl_createv(log, 0, &rnode, &cnode,
3380 CTLFLAG_READWRITE, CTLTYPE_INT,
3381 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3382 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3383 CTL_EOL) != 0)
3384 aprint_error_dev(dev, "could not create sysctl\n");
3385
3386 /*
3387 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3388 * it causesflip-flopping softint/workqueue mode in one deferred
3389 * processing. Therefore, preempt_disable()/preempt_enable() are
3390 * required in ixgbe_sched_handle_que() to avoid
3391 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3392 * I think changing "que->txrx_use_workqueue" in interrupt handler
3393 * is lighter than doing preempt_disable()/preempt_enable() in every
3394 * ixgbe_sched_handle_que().
3395 */
3396 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3397 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3398 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3399 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3400 aprint_error_dev(dev, "could not create sysctl\n");
3401
3402 #ifdef IXGBE_DEBUG
3403 /* testing sysctls (for all devices) */
3404 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3405 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3406 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3407 CTL_EOL) != 0)
3408 aprint_error_dev(dev, "could not create sysctl\n");
3409
3410 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3411 CTLTYPE_STRING, "print_rss_config",
3412 SYSCTL_DESCR("Prints RSS Configuration"),
3413 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3414 CTL_EOL) != 0)
3415 aprint_error_dev(dev, "could not create sysctl\n");
3416 #endif
3417 /* for X550 series devices */
3418 if (hw->mac.type >= ixgbe_mac_X550)
3419 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3420 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3421 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3422 CTL_EOL) != 0)
3423 aprint_error_dev(dev, "could not create sysctl\n");
3424
3425 /* for WoL-capable devices */
3426 if (adapter->wol_support) {
3427 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3428 CTLTYPE_BOOL, "wol_enable",
3429 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3430 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3431 CTL_EOL) != 0)
3432 aprint_error_dev(dev, "could not create sysctl\n");
3433
3434 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3435 CTLTYPE_INT, "wufc",
3436 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3437 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3438 CTL_EOL) != 0)
3439 aprint_error_dev(dev, "could not create sysctl\n");
3440 }
3441
3442 /* for X552/X557-AT devices */
3443 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3444 const struct sysctlnode *phy_node;
3445
3446 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3447 "phy", SYSCTL_DESCR("External PHY sysctls"),
3448 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3449 aprint_error_dev(dev, "could not create sysctl\n");
3450 return;
3451 }
3452
3453 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3454 CTLTYPE_INT, "temp",
3455 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3456 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3457 CTL_EOL) != 0)
3458 aprint_error_dev(dev, "could not create sysctl\n");
3459
3460 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3461 CTLTYPE_INT, "overtemp_occurred",
3462 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3463 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3464 CTL_CREATE, CTL_EOL) != 0)
3465 aprint_error_dev(dev, "could not create sysctl\n");
3466 }
3467
3468 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3469 && (hw->phy.type == ixgbe_phy_fw))
3470 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3471 CTLTYPE_BOOL, "force_10_100_autonego",
3472 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3473 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3474 CTL_CREATE, CTL_EOL) != 0)
3475 aprint_error_dev(dev, "could not create sysctl\n");
3476
3477 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3478 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3479 CTLTYPE_INT, "eee_state",
3480 SYSCTL_DESCR("EEE Power Save State"),
3481 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3482 CTL_EOL) != 0)
3483 aprint_error_dev(dev, "could not create sysctl\n");
3484 }
3485 } /* ixgbe_add_device_sysctls */
3486
3487 /************************************************************************
3488 * ixgbe_allocate_pci_resources
3489 ************************************************************************/
3490 static int
3491 ixgbe_allocate_pci_resources(struct adapter *adapter,
3492 const struct pci_attach_args *pa)
3493 {
3494 pcireg_t memtype, csr;
3495 device_t dev = adapter->dev;
3496 bus_addr_t addr;
3497 int flags;
3498
3499 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3500 switch (memtype) {
3501 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3502 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3503 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3504 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3505 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3506 goto map_err;
3507 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3508 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3509 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3510 }
3511 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3512 adapter->osdep.mem_size, flags,
3513 &adapter->osdep.mem_bus_space_handle) != 0) {
3514 map_err:
3515 adapter->osdep.mem_size = 0;
3516 aprint_error_dev(dev, "unable to map BAR0\n");
3517 return ENXIO;
3518 }
3519 /*
3520 * Enable address decoding for memory range in case BIOS or
3521 * UEFI don't set it.
3522 */
3523 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3524 PCI_COMMAND_STATUS_REG);
3525 csr |= PCI_COMMAND_MEM_ENABLE;
3526 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3527 csr);
3528 break;
3529 default:
3530 aprint_error_dev(dev, "unexpected type on BAR0\n");
3531 return ENXIO;
3532 }
3533
3534 return (0);
3535 } /* ixgbe_allocate_pci_resources */
3536
3537 static void
3538 ixgbe_free_deferred_handlers(struct adapter *adapter)
3539 {
3540 struct ix_queue *que = adapter->queues;
3541 struct tx_ring *txr = adapter->tx_rings;
3542 int i;
3543
3544 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3545 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3546 if (txr->txr_si != NULL)
3547 softint_disestablish(txr->txr_si);
3548 }
3549 if (que->que_si != NULL)
3550 softint_disestablish(que->que_si);
3551 }
3552 if (adapter->txr_wq != NULL)
3553 workqueue_destroy(adapter->txr_wq);
3554 if (adapter->txr_wq_enqueued != NULL)
3555 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3556 if (adapter->que_wq != NULL)
3557 workqueue_destroy(adapter->que_wq);
3558
3559 if (adapter->admin_wq != NULL) {
3560 workqueue_destroy(adapter->admin_wq);
3561 adapter->admin_wq = NULL;
3562 }
3563 if (adapter->timer_wq != NULL) {
3564 workqueue_destroy(adapter->timer_wq);
3565 adapter->timer_wq = NULL;
3566 }
3567 if (adapter->recovery_mode_timer_wq != NULL) {
3568 /*
3569 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3570 * the recovery_mode_timer workqueue, so call it here.
3571 */
3572 workqueue_wait(adapter->recovery_mode_timer_wq,
3573 &adapter->recovery_mode_timer_wc);
3574 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3575 workqueue_destroy(adapter->recovery_mode_timer_wq);
3576 adapter->recovery_mode_timer_wq = NULL;
3577 }
3578 } /* ixgbe_free_deferred_handlers */
3579
3580 /************************************************************************
3581 * ixgbe_detach - Device removal routine
3582 *
3583 * Called when the driver is being removed.
3584 * Stops the adapter and deallocates all the resources
3585 * that were allocated for driver operation.
3586 *
3587 * return 0 on success, positive on failure
3588 ************************************************************************/
3589 static int
3590 ixgbe_detach(device_t dev, int flags)
3591 {
3592 struct adapter *adapter = device_private(dev);
3593 struct rx_ring *rxr = adapter->rx_rings;
3594 struct tx_ring *txr = adapter->tx_rings;
3595 struct ixgbe_hw *hw = &adapter->hw;
3596 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3597 u32 ctrl_ext;
3598 int i;
3599
3600 INIT_DEBUGOUT("ixgbe_detach: begin");
3601 if (adapter->osdep.attached == false)
3602 return 0;
3603
3604 if (ixgbe_pci_iov_detach(dev) != 0) {
3605 device_printf(dev, "SR-IOV in use; detach first.\n");
3606 return (EBUSY);
3607 }
3608
3609 #if NVLAN > 0
3610 /* Make sure VLANs are not using driver */
3611 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3612 ; /* nothing to do: no VLANs */
3613 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3614 vlan_ifdetach(adapter->ifp);
3615 else {
3616 aprint_error_dev(dev, "VLANs in use, detach first\n");
3617 return (EBUSY);
3618 }
3619 #endif
3620
3621 adapter->osdep.detaching = true;
3622 /*
3623 * Stop the interface. ixgbe_setup_low_power_mode() calls
3624 * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop()
3625 * directly.
3626 */
3627 ixgbe_setup_low_power_mode(adapter);
3628
3629 callout_halt(&adapter->timer, NULL);
3630 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3631 callout_halt(&adapter->recovery_mode_timer, NULL);
3632
3633 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3634 atomic_store_relaxed(&adapter->admin_pending, 0);
3635 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3636 atomic_store_relaxed(&adapter->timer_pending, 0);
3637
3638 pmf_device_deregister(dev);
3639
3640 ether_ifdetach(adapter->ifp);
3641
3642 ixgbe_free_deferred_handlers(adapter);
3643
3644 /* let hardware know driver is unloading */
3645 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3646 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3648
3649 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3650 netmap_detach(adapter->ifp);
3651
3652 ixgbe_free_pci_resources(adapter);
3653 #if 0 /* XXX the NetBSD port is probably missing something here */
3654 bus_generic_detach(dev);
3655 #endif
3656 if_detach(adapter->ifp);
3657 ifmedia_fini(&adapter->media);
3658 if_percpuq_destroy(adapter->ipq);
3659
3660 sysctl_teardown(&adapter->sysctllog);
3661 evcnt_detach(&adapter->efbig_tx_dma_setup);
3662 evcnt_detach(&adapter->mbuf_defrag_failed);
3663 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3664 evcnt_detach(&adapter->einval_tx_dma_setup);
3665 evcnt_detach(&adapter->other_tx_dma_setup);
3666 evcnt_detach(&adapter->eagain_tx_dma_setup);
3667 evcnt_detach(&adapter->enomem_tx_dma_setup);
3668 evcnt_detach(&adapter->watchdog_events);
3669 evcnt_detach(&adapter->tso_err);
3670 evcnt_detach(&adapter->admin_irqev);
3671 evcnt_detach(&adapter->link_workev);
3672 evcnt_detach(&adapter->mod_workev);
3673 evcnt_detach(&adapter->msf_workev);
3674 evcnt_detach(&adapter->phy_workev);
3675
3676 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3677 if (i < __arraycount(stats->mpc)) {
3678 evcnt_detach(&stats->mpc[i]);
3679 if (hw->mac.type == ixgbe_mac_82598EB)
3680 evcnt_detach(&stats->rnbc[i]);
3681 }
3682 if (i < __arraycount(stats->pxontxc)) {
3683 evcnt_detach(&stats->pxontxc[i]);
3684 evcnt_detach(&stats->pxonrxc[i]);
3685 evcnt_detach(&stats->pxofftxc[i]);
3686 evcnt_detach(&stats->pxoffrxc[i]);
3687 if (hw->mac.type >= ixgbe_mac_82599EB)
3688 evcnt_detach(&stats->pxon2offc[i]);
3689 }
3690 }
3691
3692 txr = adapter->tx_rings;
3693 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3694 evcnt_detach(&adapter->queues[i].irqs);
3695 evcnt_detach(&adapter->queues[i].handleq);
3696 evcnt_detach(&adapter->queues[i].req);
3697 evcnt_detach(&txr->no_desc_avail);
3698 evcnt_detach(&txr->total_packets);
3699 evcnt_detach(&txr->tso_tx);
3700 #ifndef IXGBE_LEGACY_TX
3701 evcnt_detach(&txr->pcq_drops);
3702 #endif
3703
3704 if (i < __arraycount(stats->qprc)) {
3705 evcnt_detach(&stats->qprc[i]);
3706 evcnt_detach(&stats->qptc[i]);
3707 evcnt_detach(&stats->qbrc[i]);
3708 evcnt_detach(&stats->qbtc[i]);
3709 if (hw->mac.type >= ixgbe_mac_82599EB)
3710 evcnt_detach(&stats->qprdc[i]);
3711 }
3712
3713 evcnt_detach(&rxr->rx_packets);
3714 evcnt_detach(&rxr->rx_bytes);
3715 evcnt_detach(&rxr->rx_copies);
3716 evcnt_detach(&rxr->no_jmbuf);
3717 evcnt_detach(&rxr->rx_discarded);
3718 }
3719 evcnt_detach(&stats->ipcs);
3720 evcnt_detach(&stats->l4cs);
3721 evcnt_detach(&stats->ipcs_bad);
3722 evcnt_detach(&stats->l4cs_bad);
3723 evcnt_detach(&stats->intzero);
3724 evcnt_detach(&stats->legint);
3725 evcnt_detach(&stats->crcerrs);
3726 evcnt_detach(&stats->illerrc);
3727 evcnt_detach(&stats->errbc);
3728 evcnt_detach(&stats->mspdc);
3729 if (hw->mac.type >= ixgbe_mac_X550)
3730 evcnt_detach(&stats->mbsdc);
3731 evcnt_detach(&stats->mpctotal);
3732 evcnt_detach(&stats->mlfc);
3733 evcnt_detach(&stats->mrfc);
3734 evcnt_detach(&stats->rlec);
3735 evcnt_detach(&stats->lxontxc);
3736 evcnt_detach(&stats->lxonrxc);
3737 evcnt_detach(&stats->lxofftxc);
3738 evcnt_detach(&stats->lxoffrxc);
3739
3740 /* Packet Reception Stats */
3741 evcnt_detach(&stats->tor);
3742 evcnt_detach(&stats->gorc);
3743 evcnt_detach(&stats->tpr);
3744 evcnt_detach(&stats->gprc);
3745 evcnt_detach(&stats->mprc);
3746 evcnt_detach(&stats->bprc);
3747 evcnt_detach(&stats->prc64);
3748 evcnt_detach(&stats->prc127);
3749 evcnt_detach(&stats->prc255);
3750 evcnt_detach(&stats->prc511);
3751 evcnt_detach(&stats->prc1023);
3752 evcnt_detach(&stats->prc1522);
3753 evcnt_detach(&stats->ruc);
3754 evcnt_detach(&stats->rfc);
3755 evcnt_detach(&stats->roc);
3756 evcnt_detach(&stats->rjc);
3757 evcnt_detach(&stats->mngprc);
3758 evcnt_detach(&stats->mngpdc);
3759 evcnt_detach(&stats->xec);
3760
3761 /* Packet Transmission Stats */
3762 evcnt_detach(&stats->gotc);
3763 evcnt_detach(&stats->tpt);
3764 evcnt_detach(&stats->gptc);
3765 evcnt_detach(&stats->bptc);
3766 evcnt_detach(&stats->mptc);
3767 evcnt_detach(&stats->mngptc);
3768 evcnt_detach(&stats->ptc64);
3769 evcnt_detach(&stats->ptc127);
3770 evcnt_detach(&stats->ptc255);
3771 evcnt_detach(&stats->ptc511);
3772 evcnt_detach(&stats->ptc1023);
3773 evcnt_detach(&stats->ptc1522);
3774
3775 ixgbe_free_queues(adapter);
3776 free(adapter->mta, M_DEVBUF);
3777
3778 mutex_destroy(&adapter->admin_mtx); /* XXX appropriate order? */
3779 IXGBE_CORE_LOCK_DESTROY(adapter);
3780
3781 return (0);
3782 } /* ixgbe_detach */
3783
3784 /************************************************************************
3785 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3786 *
3787 * Prepare the adapter/port for LPLU and/or WoL
3788 ************************************************************************/
3789 static int
3790 ixgbe_setup_low_power_mode(struct adapter *adapter)
3791 {
3792 struct ixgbe_hw *hw = &adapter->hw;
3793 device_t dev = adapter->dev;
3794 struct ifnet *ifp = adapter->ifp;
3795 s32 error = 0;
3796
3797 /* Limit power management flow to X550EM baseT */
3798 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3799 hw->phy.ops.enter_lplu) {
3800 /* X550EM baseT adapters need a special LPLU flow */
3801 hw->phy.reset_disable = true;
3802 ixgbe_ifstop(ifp, 1);
3803 error = hw->phy.ops.enter_lplu(hw);
3804 if (error)
3805 device_printf(dev,
3806 "Error entering LPLU: %d\n", error);
3807 hw->phy.reset_disable = false;
3808 } else {
3809 /* Just stop for other adapters */
3810 ixgbe_ifstop(ifp, 1);
3811 }
3812
3813 IXGBE_CORE_LOCK(adapter);
3814
3815 if (!hw->wol_enabled) {
3816 ixgbe_set_phy_power(hw, FALSE);
3817 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3818 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3819 } else {
3820 /* Turn off support for APM wakeup. (Using ACPI instead) */
3821 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3822 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3823
3824 /*
3825 * Clear Wake Up Status register to prevent any previous wakeup
3826 * events from waking us up immediately after we suspend.
3827 */
3828 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3829
3830 /*
3831 * Program the Wakeup Filter Control register with user filter
3832 * settings
3833 */
3834 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3835
3836 /* Enable wakeups and power management in Wakeup Control */
3837 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3838 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3839
3840 }
3841
3842 IXGBE_CORE_UNLOCK(adapter);
3843
3844 return error;
3845 } /* ixgbe_setup_low_power_mode */
3846
3847 /************************************************************************
3848 * ixgbe_shutdown - Shutdown entry point
3849 ************************************************************************/
3850 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3851 static int
3852 ixgbe_shutdown(device_t dev)
3853 {
3854 struct adapter *adapter = device_private(dev);
3855 int error = 0;
3856
3857 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3858
3859 error = ixgbe_setup_low_power_mode(adapter);
3860
3861 return (error);
3862 } /* ixgbe_shutdown */
3863 #endif
3864
3865 /************************************************************************
3866 * ixgbe_suspend
3867 *
3868 * From D0 to D3
3869 ************************************************************************/
3870 static bool
3871 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3872 {
3873 struct adapter *adapter = device_private(dev);
3874 int error = 0;
3875
3876 INIT_DEBUGOUT("ixgbe_suspend: begin");
3877
3878 error = ixgbe_setup_low_power_mode(adapter);
3879
3880 return (error);
3881 } /* ixgbe_suspend */
3882
3883 /************************************************************************
3884 * ixgbe_resume
3885 *
3886 * From D3 to D0
3887 ************************************************************************/
3888 static bool
3889 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3890 {
3891 struct adapter *adapter = device_private(dev);
3892 struct ifnet *ifp = adapter->ifp;
3893 struct ixgbe_hw *hw = &adapter->hw;
3894 u32 wus;
3895
3896 INIT_DEBUGOUT("ixgbe_resume: begin");
3897
3898 IXGBE_CORE_LOCK(adapter);
3899
3900 /* Read & clear WUS register */
3901 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3902 if (wus)
3903 device_printf(dev, "Woken up by (WUS): %#010x\n",
3904 IXGBE_READ_REG(hw, IXGBE_WUS));
3905 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3906 /* And clear WUFC until next low-power transition */
3907 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3908
3909 /*
3910 * Required after D3->D0 transition;
3911 * will re-advertise all previous advertised speeds
3912 */
3913 if (ifp->if_flags & IFF_UP)
3914 ixgbe_init_locked(adapter);
3915
3916 IXGBE_CORE_UNLOCK(adapter);
3917
3918 return true;
3919 } /* ixgbe_resume */
3920
3921 /*
3922 * Set the various hardware offload abilities.
3923 *
3924 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3925 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3926 * mbuf offload flags the driver will understand.
3927 */
3928 static void
3929 ixgbe_set_if_hwassist(struct adapter *adapter)
3930 {
3931 /* XXX */
3932 }
3933
3934 /************************************************************************
3935 * ixgbe_init_locked - Init entry point
3936 *
3937 * Used in two ways: It is used by the stack as an init
3938 * entry point in network interface structure. It is also
3939 * used by the driver as a hw/sw initialization routine to
3940 * get to a consistent state.
3941 *
3942 * return 0 on success, positive on failure
3943 ************************************************************************/
3944 static void
3945 ixgbe_init_locked(struct adapter *adapter)
3946 {
3947 struct ifnet *ifp = adapter->ifp;
3948 device_t dev = adapter->dev;
3949 struct ixgbe_hw *hw = &adapter->hw;
3950 struct ix_queue *que;
3951 struct tx_ring *txr;
3952 struct rx_ring *rxr;
3953 u32 txdctl, mhadd;
3954 u32 rxdctl, rxctrl;
3955 u32 ctrl_ext;
3956 bool unsupported_sfp = false;
3957 int i, j, err;
3958
3959 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3960
3961 KASSERT(mutex_owned(&adapter->core_mtx));
3962 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3963
3964 hw->need_unsupported_sfp_recovery = false;
3965 hw->adapter_stopped = FALSE;
3966 ixgbe_stop_adapter(hw);
3967 callout_stop(&adapter->timer);
3968 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3969 callout_stop(&adapter->recovery_mode_timer);
3970 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3971 que->disabled_count = 0;
3972
3973 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3974 adapter->max_frame_size =
3975 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3976
3977 /* Queue indices may change with IOV mode */
3978 ixgbe_align_all_queue_indices(adapter);
3979
3980 /* reprogram the RAR[0] in case user changed it. */
3981 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3982
3983 /* Get the latest mac address, User can use a LAA */
3984 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3985 IXGBE_ETH_LENGTH_OF_ADDRESS);
3986 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3987 hw->addr_ctrl.rar_used_count = 1;
3988
3989 /* Set hardware offload abilities from ifnet flags */
3990 ixgbe_set_if_hwassist(adapter);
3991
3992 /* Prepare transmit descriptors and buffers */
3993 if (ixgbe_setup_transmit_structures(adapter)) {
3994 device_printf(dev, "Could not setup transmit structures\n");
3995 ixgbe_stop_locked(adapter);
3996 return;
3997 }
3998
3999 ixgbe_init_hw(hw);
4000
4001 ixgbe_initialize_iov(adapter);
4002
4003 ixgbe_initialize_transmit_units(adapter);
4004
4005 /* Setup Multicast table */
4006 ixgbe_set_rxfilter(adapter);
4007
4008 /* Determine the correct mbuf pool, based on frame size */
4009 if (adapter->max_frame_size <= MCLBYTES)
4010 adapter->rx_mbuf_sz = MCLBYTES;
4011 else
4012 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4013
4014 /* Prepare receive descriptors and buffers */
4015 if (ixgbe_setup_receive_structures(adapter)) {
4016 device_printf(dev, "Could not setup receive structures\n");
4017 ixgbe_stop_locked(adapter);
4018 return;
4019 }
4020
4021 /* Configure RX settings */
4022 ixgbe_initialize_receive_units(adapter);
4023
4024 /* Initialize variable holding task enqueue requests interrupts */
4025 adapter->task_requests = 0;
4026
4027 /* Enable SDP & MSI-X interrupts based on adapter */
4028 ixgbe_config_gpie(adapter);
4029
4030 /* Set MTU size */
4031 if (ifp->if_mtu > ETHERMTU) {
4032 /* aka IXGBE_MAXFRS on 82599 and newer */
4033 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4034 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4035 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4036 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4037 }
4038
4039 /* Now enable all the queues */
4040 for (i = 0; i < adapter->num_queues; i++) {
4041 txr = &adapter->tx_rings[i];
4042 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4043 txdctl |= IXGBE_TXDCTL_ENABLE;
4044 /* Set WTHRESH to 8, burst writeback */
4045 txdctl |= (8 << 16);
4046 /*
4047 * When the internal queue falls below PTHRESH (32),
4048 * start prefetching as long as there are at least
4049 * HTHRESH (1) buffers ready. The values are taken
4050 * from the Intel linux driver 3.8.21.
4051 * Prefetching enables tx line rate even with 1 queue.
4052 */
4053 txdctl |= (32 << 0) | (1 << 8);
4054 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4055 }
4056
4057 for (i = 0; i < adapter->num_queues; i++) {
4058 rxr = &adapter->rx_rings[i];
4059 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4060 if (hw->mac.type == ixgbe_mac_82598EB) {
4061 /*
4062 * PTHRESH = 21
4063 * HTHRESH = 4
4064 * WTHRESH = 8
4065 */
4066 rxdctl &= ~0x3FFFFF;
4067 rxdctl |= 0x080420;
4068 }
4069 rxdctl |= IXGBE_RXDCTL_ENABLE;
4070 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4071 for (j = 0; j < 10; j++) {
4072 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4073 IXGBE_RXDCTL_ENABLE)
4074 break;
4075 else
4076 msec_delay(1);
4077 }
4078 IXGBE_WRITE_BARRIER(hw);
4079
4080 /*
4081 * In netmap mode, we must preserve the buffers made
4082 * available to userspace before the if_init()
4083 * (this is true by default on the TX side, because
4084 * init makes all buffers available to userspace).
4085 *
4086 * netmap_reset() and the device specific routines
4087 * (e.g. ixgbe_setup_receive_rings()) map these
4088 * buffers at the end of the NIC ring, so here we
4089 * must set the RDT (tail) register to make sure
4090 * they are not overwritten.
4091 *
4092 * In this driver the NIC ring starts at RDH = 0,
4093 * RDT points to the last slot available for reception (?),
4094 * so RDT = num_rx_desc - 1 means the whole ring is available.
4095 */
4096 #ifdef DEV_NETMAP
4097 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4098 (ifp->if_capenable & IFCAP_NETMAP)) {
4099 struct netmap_adapter *na = NA(adapter->ifp);
4100 struct netmap_kring *kring = na->rx_rings[i];
4101 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4102
4103 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4104 } else
4105 #endif /* DEV_NETMAP */
4106 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4107 adapter->num_rx_desc - 1);
4108 }
4109
4110 /* Enable Receive engine */
4111 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4112 if (hw->mac.type == ixgbe_mac_82598EB)
4113 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4114 rxctrl |= IXGBE_RXCTRL_RXEN;
4115 ixgbe_enable_rx_dma(hw, rxctrl);
4116
4117 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4118 atomic_store_relaxed(&adapter->timer_pending, 0);
4119 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4120 callout_reset(&adapter->recovery_mode_timer, hz,
4121 ixgbe_recovery_mode_timer, adapter);
4122
4123 /* Set up MSI/MSI-X routing */
4124 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4125 ixgbe_configure_ivars(adapter);
4126 /* Set up auto-mask */
4127 if (hw->mac.type == ixgbe_mac_82598EB)
4128 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4129 else {
4130 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4131 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4132 }
4133 } else { /* Simple settings for Legacy/MSI */
4134 ixgbe_set_ivar(adapter, 0, 0, 0);
4135 ixgbe_set_ivar(adapter, 0, 0, 1);
4136 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4137 }
4138
4139 ixgbe_init_fdir(adapter);
4140
4141 /*
4142 * Check on any SFP devices that
4143 * need to be kick-started
4144 */
4145 if (hw->phy.type == ixgbe_phy_none) {
4146 err = hw->phy.ops.identify(hw);
4147 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4148 unsupported_sfp = true;
4149 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4150 unsupported_sfp = true;
4151
4152 if (unsupported_sfp)
4153 device_printf(dev,
4154 "Unsupported SFP+ module type was detected.\n");
4155
4156 /* Set moderation on the Link interrupt */
4157 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4158
4159 /* Enable EEE power saving */
4160 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4161 hw->mac.ops.setup_eee(hw,
4162 adapter->feat_en & IXGBE_FEATURE_EEE);
4163
4164 /* Enable power to the phy. */
4165 if (!unsupported_sfp) {
4166 ixgbe_set_phy_power(hw, TRUE);
4167
4168 /* Config/Enable Link */
4169 ixgbe_config_link(adapter);
4170 }
4171
4172 /* Hardware Packet Buffer & Flow Control setup */
4173 ixgbe_config_delay_values(adapter);
4174
4175 /* Initialize the FC settings */
4176 ixgbe_start_hw(hw);
4177
4178 /* Set up VLAN support and filter */
4179 ixgbe_setup_vlan_hw_support(adapter);
4180
4181 /* Setup DMA Coalescing */
4182 ixgbe_config_dmac(adapter);
4183
4184 /* OK to schedule workqueues. */
4185 adapter->schedule_wqs_ok = true;
4186
4187 /* And now turn on interrupts */
4188 ixgbe_enable_intr(adapter);
4189
4190 /* Enable the use of the MBX by the VF's */
4191 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4192 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4193 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4194 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4195 }
4196
4197 /* Update saved flags. See ixgbe_ifflags_cb() */
4198 adapter->if_flags = ifp->if_flags;
4199 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4200
4201 /* Now inform the stack we're ready */
4202 ifp->if_flags |= IFF_RUNNING;
4203
4204 return;
4205 } /* ixgbe_init_locked */
4206
4207 /************************************************************************
4208 * ixgbe_init
4209 ************************************************************************/
4210 static int
4211 ixgbe_init(struct ifnet *ifp)
4212 {
4213 struct adapter *adapter = ifp->if_softc;
4214
4215 IXGBE_CORE_LOCK(adapter);
4216 ixgbe_init_locked(adapter);
4217 IXGBE_CORE_UNLOCK(adapter);
4218
4219 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4220 } /* ixgbe_init */
4221
4222 /************************************************************************
4223 * ixgbe_set_ivar
4224 *
4225 * Setup the correct IVAR register for a particular MSI-X interrupt
4226 * (yes this is all very magic and confusing :)
4227 * - entry is the register array entry
4228 * - vector is the MSI-X vector for this queue
4229 * - type is RX/TX/MISC
4230 ************************************************************************/
4231 static void
4232 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4233 {
4234 struct ixgbe_hw *hw = &adapter->hw;
4235 u32 ivar, index;
4236
4237 vector |= IXGBE_IVAR_ALLOC_VAL;
4238
4239 switch (hw->mac.type) {
4240 case ixgbe_mac_82598EB:
4241 if (type == -1)
4242 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4243 else
4244 entry += (type * 64);
4245 index = (entry >> 2) & 0x1F;
4246 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4247 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4248 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4249 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4250 break;
4251 case ixgbe_mac_82599EB:
4252 case ixgbe_mac_X540:
4253 case ixgbe_mac_X550:
4254 case ixgbe_mac_X550EM_x:
4255 case ixgbe_mac_X550EM_a:
4256 if (type == -1) { /* MISC IVAR */
4257 index = (entry & 1) * 8;
4258 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4259 ivar &= ~(0xffUL << index);
4260 ivar |= ((u32)vector << index);
4261 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4262 } else { /* RX/TX IVARS */
4263 index = (16 * (entry & 1)) + (8 * type);
4264 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4265 ivar &= ~(0xffUL << index);
4266 ivar |= ((u32)vector << index);
4267 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4268 }
4269 break;
4270 default:
4271 break;
4272 }
4273 } /* ixgbe_set_ivar */
4274
4275 /************************************************************************
4276 * ixgbe_configure_ivars
4277 ************************************************************************/
4278 static void
4279 ixgbe_configure_ivars(struct adapter *adapter)
4280 {
4281 struct ix_queue *que = adapter->queues;
4282 u32 newitr;
4283
4284 if (ixgbe_max_interrupt_rate > 0)
4285 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4286 else {
4287 /*
4288 * Disable DMA coalescing if interrupt moderation is
4289 * disabled.
4290 */
4291 adapter->dmac = 0;
4292 newitr = 0;
4293 }
4294
4295 for (int i = 0; i < adapter->num_queues; i++, que++) {
4296 struct rx_ring *rxr = &adapter->rx_rings[i];
4297 struct tx_ring *txr = &adapter->tx_rings[i];
4298 /* First the RX queue entry */
4299 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4300 /* ... and the TX */
4301 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4302 /* Set an Initial EITR value */
4303 ixgbe_eitr_write(adapter, que->msix, newitr);
4304 /*
4305 * To eliminate influence of the previous state.
4306 * At this point, Tx/Rx interrupt handler
4307 * (ixgbe_msix_que()) cannot be called, so both
4308 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4309 */
4310 que->eitr_setting = 0;
4311 }
4312
4313 /* For the Link interrupt */
4314 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4315 } /* ixgbe_configure_ivars */
4316
4317 /************************************************************************
4318 * ixgbe_config_gpie
4319 ************************************************************************/
4320 static void
4321 ixgbe_config_gpie(struct adapter *adapter)
4322 {
4323 struct ixgbe_hw *hw = &adapter->hw;
4324 u32 gpie;
4325
4326 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4327
4328 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4329 /* Enable Enhanced MSI-X mode */
4330 gpie |= IXGBE_GPIE_MSIX_MODE
4331 | IXGBE_GPIE_EIAME
4332 | IXGBE_GPIE_PBA_SUPPORT
4333 | IXGBE_GPIE_OCD;
4334 }
4335
4336 /* Fan Failure Interrupt */
4337 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4338 gpie |= IXGBE_SDP1_GPIEN;
4339
4340 /* Thermal Sensor Interrupt */
4341 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4342 gpie |= IXGBE_SDP0_GPIEN_X540;
4343
4344 /* Link detection */
4345 switch (hw->mac.type) {
4346 case ixgbe_mac_82599EB:
4347 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4348 break;
4349 case ixgbe_mac_X550EM_x:
4350 case ixgbe_mac_X550EM_a:
4351 gpie |= IXGBE_SDP0_GPIEN_X540;
4352 break;
4353 default:
4354 break;
4355 }
4356
4357 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4358
4359 } /* ixgbe_config_gpie */
4360
4361 /************************************************************************
4362 * ixgbe_config_delay_values
4363 *
4364 * Requires adapter->max_frame_size to be set.
4365 ************************************************************************/
4366 static void
4367 ixgbe_config_delay_values(struct adapter *adapter)
4368 {
4369 struct ixgbe_hw *hw = &adapter->hw;
4370 u32 rxpb, frame, size, tmp;
4371
4372 frame = adapter->max_frame_size;
4373
4374 /* Calculate High Water */
4375 switch (hw->mac.type) {
4376 case ixgbe_mac_X540:
4377 case ixgbe_mac_X550:
4378 case ixgbe_mac_X550EM_x:
4379 case ixgbe_mac_X550EM_a:
4380 tmp = IXGBE_DV_X540(frame, frame);
4381 break;
4382 default:
4383 tmp = IXGBE_DV(frame, frame);
4384 break;
4385 }
4386 size = IXGBE_BT2KB(tmp);
4387 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4388 hw->fc.high_water[0] = rxpb - size;
4389
4390 /* Now calculate Low Water */
4391 switch (hw->mac.type) {
4392 case ixgbe_mac_X540:
4393 case ixgbe_mac_X550:
4394 case ixgbe_mac_X550EM_x:
4395 case ixgbe_mac_X550EM_a:
4396 tmp = IXGBE_LOW_DV_X540(frame);
4397 break;
4398 default:
4399 tmp = IXGBE_LOW_DV(frame);
4400 break;
4401 }
4402 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4403
4404 hw->fc.pause_time = IXGBE_FC_PAUSE;
4405 hw->fc.send_xon = TRUE;
4406 } /* ixgbe_config_delay_values */
4407
4408 /************************************************************************
4409 * ixgbe_set_rxfilter - Multicast Update
4410 *
4411 * Called whenever multicast address list is updated.
4412 ************************************************************************/
4413 static void
4414 ixgbe_set_rxfilter(struct adapter *adapter)
4415 {
4416 struct ixgbe_mc_addr *mta;
4417 struct ifnet *ifp = adapter->ifp;
4418 u8 *update_ptr;
4419 int mcnt = 0;
4420 u32 fctrl;
4421 struct ethercom *ec = &adapter->osdep.ec;
4422 struct ether_multi *enm;
4423 struct ether_multistep step;
4424
4425 KASSERT(mutex_owned(&adapter->core_mtx));
4426 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4427
4428 mta = adapter->mta;
4429 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4430
4431 ETHER_LOCK(ec);
4432 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4433 ETHER_FIRST_MULTI(step, ec, enm);
4434 while (enm != NULL) {
4435 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4436 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4437 ETHER_ADDR_LEN) != 0)) {
4438 ec->ec_flags |= ETHER_F_ALLMULTI;
4439 break;
4440 }
4441 bcopy(enm->enm_addrlo,
4442 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4443 mta[mcnt].vmdq = adapter->pool;
4444 mcnt++;
4445 ETHER_NEXT_MULTI(step, enm);
4446 }
4447
4448 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4449 if (ifp->if_flags & IFF_PROMISC)
4450 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4451 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4452 fctrl |= IXGBE_FCTRL_MPE;
4453 fctrl &= ~IXGBE_FCTRL_UPE;
4454 } else
4455 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4456
4457 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4458
4459 /* Update multicast filter entries only when it's not ALLMULTI */
4460 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4461 ETHER_UNLOCK(ec);
4462 update_ptr = (u8 *)mta;
4463 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4464 ixgbe_mc_array_itr, TRUE);
4465 } else
4466 ETHER_UNLOCK(ec);
4467 } /* ixgbe_set_rxfilter */
4468
4469 /************************************************************************
4470 * ixgbe_mc_array_itr
4471 *
4472 * An iterator function needed by the multicast shared code.
4473 * It feeds the shared code routine the addresses in the
4474 * array of ixgbe_set_rxfilter() one by one.
4475 ************************************************************************/
4476 static u8 *
4477 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4478 {
4479 struct ixgbe_mc_addr *mta;
4480
4481 mta = (struct ixgbe_mc_addr *)*update_ptr;
4482 *vmdq = mta->vmdq;
4483
4484 *update_ptr = (u8*)(mta + 1);
4485
4486 return (mta->addr);
4487 } /* ixgbe_mc_array_itr */
4488
4489 /************************************************************************
4490 * ixgbe_local_timer - Timer routine
4491 *
4492 * Checks for link status, updates statistics,
4493 * and runs the watchdog check.
4494 ************************************************************************/
4495 static void
4496 ixgbe_local_timer(void *arg)
4497 {
4498 struct adapter *adapter = arg;
4499
4500 if (adapter->schedule_wqs_ok) {
4501 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4502 workqueue_enqueue(adapter->timer_wq,
4503 &adapter->timer_wc, NULL);
4504 }
4505 }
4506
4507 static void
4508 ixgbe_handle_timer(struct work *wk, void *context)
4509 {
4510 struct adapter *adapter = context;
4511 struct ixgbe_hw *hw = &adapter->hw;
4512 device_t dev = adapter->dev;
4513 struct ix_queue *que = adapter->queues;
4514 u64 queues = 0;
4515 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4516 int hung = 0;
4517 int i;
4518
4519 IXGBE_CORE_LOCK(adapter);
4520
4521 /* Check for pluggable optics */
4522 if (ixgbe_is_sfp(hw)) {
4523 bool sched_mod_task = false;
4524
4525 if (hw->mac.type == ixgbe_mac_82598EB) {
4526 /*
4527 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4528 * any GPIO(SDP). So just schedule TASK_MOD.
4529 */
4530 sched_mod_task = true;
4531 } else {
4532 bool was_full, is_full;
4533
4534 was_full =
4535 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4536 is_full = ixgbe_sfp_cage_full(hw);
4537
4538 /* Do probe if cage state changed */
4539 if (was_full ^ is_full)
4540 sched_mod_task = true;
4541 }
4542 if (sched_mod_task) {
4543 mutex_enter(&adapter->admin_mtx);
4544 adapter->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
4545 ixgbe_schedule_admin_tasklet(adapter);
4546 mutex_exit(&adapter->admin_mtx);
4547 }
4548 }
4549
4550 ixgbe_update_link_status(adapter);
4551 ixgbe_update_stats_counters(adapter);
4552
4553 /* Update some event counters */
4554 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4555 que = adapter->queues;
4556 for (i = 0; i < adapter->num_queues; i++, que++) {
4557 struct tx_ring *txr = que->txr;
4558
4559 v0 += txr->q_efbig_tx_dma_setup;
4560 v1 += txr->q_mbuf_defrag_failed;
4561 v2 += txr->q_efbig2_tx_dma_setup;
4562 v3 += txr->q_einval_tx_dma_setup;
4563 v4 += txr->q_other_tx_dma_setup;
4564 v5 += txr->q_eagain_tx_dma_setup;
4565 v6 += txr->q_enomem_tx_dma_setup;
4566 v7 += txr->q_tso_err;
4567 }
4568 adapter->efbig_tx_dma_setup.ev_count = v0;
4569 adapter->mbuf_defrag_failed.ev_count = v1;
4570 adapter->efbig2_tx_dma_setup.ev_count = v2;
4571 adapter->einval_tx_dma_setup.ev_count = v3;
4572 adapter->other_tx_dma_setup.ev_count = v4;
4573 adapter->eagain_tx_dma_setup.ev_count = v5;
4574 adapter->enomem_tx_dma_setup.ev_count = v6;
4575 adapter->tso_err.ev_count = v7;
4576
4577 /*
4578 * Check the TX queues status
4579 * - mark hung queues so we don't schedule on them
4580 * - watchdog only if all queues show hung
4581 */
4582 que = adapter->queues;
4583 for (i = 0; i < adapter->num_queues; i++, que++) {
4584 /* Keep track of queues with work for soft irq */
4585 if (que->txr->busy)
4586 queues |= 1ULL << que->me;
4587 /*
4588 * Each time txeof runs without cleaning, but there
4589 * are uncleaned descriptors it increments busy. If
4590 * we get to the MAX we declare it hung.
4591 */
4592 if (que->busy == IXGBE_QUEUE_HUNG) {
4593 ++hung;
4594 /* Mark the queue as inactive */
4595 adapter->active_queues &= ~(1ULL << que->me);
4596 continue;
4597 } else {
4598 /* Check if we've come back from hung */
4599 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4600 adapter->active_queues |= 1ULL << que->me;
4601 }
4602 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4603 device_printf(dev,
4604 "Warning queue %d appears to be hung!\n", i);
4605 que->txr->busy = IXGBE_QUEUE_HUNG;
4606 ++hung;
4607 }
4608 }
4609
4610 /* Only truly watchdog if all queues show hung */
4611 if (hung == adapter->num_queues)
4612 goto watchdog;
4613 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4614 else if (queues != 0) { /* Force an IRQ on queues with work */
4615 que = adapter->queues;
4616 for (i = 0; i < adapter->num_queues; i++, que++) {
4617 mutex_enter(&que->dc_mtx);
4618 if (que->disabled_count == 0)
4619 ixgbe_rearm_queues(adapter,
4620 queues & ((u64)1 << i));
4621 mutex_exit(&que->dc_mtx);
4622 }
4623 }
4624 #endif
4625
4626 atomic_store_relaxed(&adapter->timer_pending, 0);
4627 IXGBE_CORE_UNLOCK(adapter);
4628 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4629 return;
4630
4631 watchdog:
4632 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4633 adapter->ifp->if_flags &= ~IFF_RUNNING;
4634 adapter->watchdog_events.ev_count++;
4635 ixgbe_init_locked(adapter);
4636 IXGBE_CORE_UNLOCK(adapter);
4637 } /* ixgbe_handle_timer */
4638
4639 /************************************************************************
4640 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4641 ************************************************************************/
4642 static void
4643 ixgbe_recovery_mode_timer(void *arg)
4644 {
4645 struct adapter *adapter = arg;
4646
4647 if (__predict_true(adapter->osdep.detaching == false)) {
4648 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending,
4649 0, 1) == 0) {
4650 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4651 &adapter->recovery_mode_timer_wc, NULL);
4652 }
4653 }
4654 }
4655
4656 static void
4657 ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4658 {
4659 struct adapter *adapter = context;
4660 struct ixgbe_hw *hw = &adapter->hw;
4661
4662 IXGBE_CORE_LOCK(adapter);
4663 if (ixgbe_fw_recovery_mode(hw)) {
4664 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4665 /* Firmware error detected, entering recovery mode */
4666 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4667
4668 if (hw->adapter_stopped == FALSE)
4669 ixgbe_stop_locked(adapter);
4670 }
4671 } else
4672 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4673
4674 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4675 callout_reset(&adapter->recovery_mode_timer, hz,
4676 ixgbe_recovery_mode_timer, adapter);
4677 IXGBE_CORE_UNLOCK(adapter);
4678 } /* ixgbe_handle_recovery_mode_timer */
4679
4680 /************************************************************************
4681 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4682 * bool int_en: true if it's called when the interrupt is enabled.
4683 ************************************************************************/
4684 static void
4685 ixgbe_handle_mod(void *context, bool int_en)
4686 {
4687 struct adapter *adapter = context;
4688 struct ixgbe_hw *hw = &adapter->hw;
4689 device_t dev = adapter->dev;
4690 enum ixgbe_sfp_type last_sfp_type;
4691 u32 err;
4692 bool last_unsupported_sfp_recovery;
4693
4694 KASSERT(mutex_owned(&adapter->core_mtx));
4695
4696 last_sfp_type = hw->phy.sfp_type;
4697 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4698 ++adapter->mod_workev.ev_count;
4699 if (adapter->hw.need_crosstalk_fix) {
4700 if ((hw->mac.type != ixgbe_mac_82598EB) &&
4701 !ixgbe_sfp_cage_full(hw))
4702 goto out;
4703 }
4704
4705 err = hw->phy.ops.identify_sfp(hw);
4706 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4707 if (last_unsupported_sfp_recovery == false)
4708 device_printf(dev,
4709 "Unsupported SFP+ module type was detected.\n");
4710 goto out;
4711 }
4712
4713 if (hw->need_unsupported_sfp_recovery) {
4714 device_printf(dev, "Recovering from unsupported SFP\n");
4715 /*
4716 * We could recover the status by calling setup_sfp(),
4717 * setup_link() and some others. It's complex and might not
4718 * work correctly on some unknown cases. To avoid such type of
4719 * problem, call ixgbe_init_locked(). It's simple and safe
4720 * approach.
4721 */
4722 ixgbe_init_locked(adapter);
4723 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4724 (hw->phy.sfp_type != last_sfp_type)) {
4725 /* A module is inserted and changed. */
4726
4727 if (hw->mac.type == ixgbe_mac_82598EB)
4728 err = hw->phy.ops.reset(hw);
4729 else {
4730 err = hw->mac.ops.setup_sfp(hw);
4731 hw->phy.sfp_setup_needed = FALSE;
4732 }
4733 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4734 device_printf(dev,
4735 "Setup failure - unsupported SFP+ module type.\n");
4736 goto out;
4737 }
4738 }
4739
4740 out:
4741 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4742 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4743
4744 /* Adjust media types shown in ifconfig */
4745 IXGBE_CORE_UNLOCK(adapter);
4746 ifmedia_removeall(&adapter->media);
4747 ixgbe_add_media_types(adapter);
4748 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4749 IXGBE_CORE_LOCK(adapter);
4750
4751 /*
4752 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4753 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4754 * flap because the function calls setup_link().
4755 */
4756 if (hw->mac.type != ixgbe_mac_82598EB) {
4757 mutex_enter(&adapter->admin_mtx);
4758 if (int_en)
4759 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
4760 else
4761 adapter->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
4762 mutex_exit(&adapter->admin_mtx);
4763 }
4764
4765 /*
4766 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4767 * the workqueue now.
4768 */
4769 } /* ixgbe_handle_mod */
4770
4771
4772 /************************************************************************
4773 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4774 ************************************************************************/
4775 static void
4776 ixgbe_handle_msf(void *context)
4777 {
4778 struct adapter *adapter = context;
4779 struct ixgbe_hw *hw = &adapter->hw;
4780 u32 autoneg;
4781 bool negotiate;
4782
4783 KASSERT(mutex_owned(&adapter->core_mtx));
4784
4785 ++adapter->msf_workev.ev_count;
4786
4787 autoneg = hw->phy.autoneg_advertised;
4788 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4789 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4790 if (hw->mac.ops.setup_link)
4791 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4792 } /* ixgbe_handle_msf */
4793
4794 /************************************************************************
4795 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4796 ************************************************************************/
4797 static void
4798 ixgbe_handle_phy(void *context)
4799 {
4800 struct adapter *adapter = context;
4801 struct ixgbe_hw *hw = &adapter->hw;
4802 int error;
4803
4804 KASSERT(mutex_owned(&adapter->core_mtx));
4805
4806 ++adapter->phy_workev.ev_count;
4807 error = hw->phy.ops.handle_lasi(hw);
4808 if (error == IXGBE_ERR_OVERTEMP)
4809 device_printf(adapter->dev,
4810 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4811 " PHY will downshift to lower power state!\n");
4812 else if (error)
4813 device_printf(adapter->dev,
4814 "Error handling LASI interrupt: %d\n", error);
4815 } /* ixgbe_handle_phy */
4816
4817 static void
4818 ixgbe_handle_admin(struct work *wk, void *context)
4819 {
4820 struct adapter *adapter = context;
4821 struct ifnet *ifp = adapter->ifp;
4822 struct ixgbe_hw *hw = &adapter->hw;
4823 u32 task_requests;
4824 u32 eims_enable = 0;
4825
4826 mutex_enter(&adapter->admin_mtx);
4827 adapter->admin_pending = 0;
4828 task_requests = adapter->task_requests;
4829 adapter->task_requests = 0;
4830 mutex_exit(&adapter->admin_mtx);
4831
4832 /*
4833 * Hold the IFNET_LOCK across this entire call. This will
4834 * prevent additional changes to adapter->phy_layer
4835 * and serialize calls to this tasklet. We cannot hold the
4836 * CORE_LOCK while calling into the ifmedia functions as
4837 * they call ifmedia_lock() and the lock is CORE_LOCK.
4838 */
4839 IFNET_LOCK(ifp);
4840 IXGBE_CORE_LOCK(adapter);
4841 if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) {
4842 ixgbe_handle_link(adapter);
4843 eims_enable |= IXGBE_EIMS_LSC;
4844 }
4845 if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) {
4846 ixgbe_handle_mod(adapter, false);
4847 }
4848 if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) {
4849 ixgbe_handle_mod(adapter, true);
4850 if (hw->mac.type >= ixgbe_mac_X540)
4851 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4852 else
4853 eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4854 }
4855 if ((task_requests
4856 & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) {
4857 ixgbe_handle_msf(adapter);
4858 if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) &&
4859 (hw->mac.type == ixgbe_mac_82599EB))
4860 eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
4861 }
4862 if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) {
4863 ixgbe_handle_phy(adapter);
4864 eims_enable |= IXGBE_EICR_GPI_SDP0_X540;
4865 }
4866 if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) {
4867 ixgbe_reinit_fdir(adapter);
4868 eims_enable |= IXGBE_EIMS_FLOW_DIR;
4869 }
4870 #if 0 /* notyet */
4871 if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) {
4872 ixgbe_handle_mbx(adapter);
4873 eims_enable |= IXGBE_EIMS_MAILBOX;
4874 }
4875 #endif
4876 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable);
4877
4878 IXGBE_CORE_UNLOCK(adapter);
4879 IFNET_UNLOCK(ifp);
4880 } /* ixgbe_handle_admin */
4881
4882 static void
4883 ixgbe_ifstop(struct ifnet *ifp, int disable)
4884 {
4885 struct adapter *adapter = ifp->if_softc;
4886
4887 IXGBE_CORE_LOCK(adapter);
4888 ixgbe_stop_locked(adapter);
4889 IXGBE_CORE_UNLOCK(adapter);
4890
4891 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4892 atomic_store_relaxed(&adapter->timer_pending, 0);
4893 }
4894
4895 /************************************************************************
4896 * ixgbe_stop_locked - Stop the hardware
4897 *
4898 * Disables all traffic on the adapter by issuing a
4899 * global reset on the MAC and deallocates TX/RX buffers.
4900 ************************************************************************/
4901 static void
4902 ixgbe_stop_locked(void *arg)
4903 {
4904 struct ifnet *ifp;
4905 struct adapter *adapter = arg;
4906 struct ixgbe_hw *hw = &adapter->hw;
4907
4908 ifp = adapter->ifp;
4909
4910 KASSERT(mutex_owned(&adapter->core_mtx));
4911
4912 INIT_DEBUGOUT("ixgbe_stop_locked: begin\n");
4913 ixgbe_disable_intr(adapter);
4914 callout_stop(&adapter->timer);
4915
4916 /* Don't schedule workqueues. */
4917 adapter->schedule_wqs_ok = false;
4918
4919 /* Let the stack know...*/
4920 ifp->if_flags &= ~IFF_RUNNING;
4921
4922 ixgbe_reset_hw(hw);
4923 hw->adapter_stopped = FALSE;
4924 ixgbe_stop_adapter(hw);
4925 if (hw->mac.type == ixgbe_mac_82599EB)
4926 ixgbe_stop_mac_link_on_d3_82599(hw);
4927 /* Turn off the laser - noop with no optics */
4928 ixgbe_disable_tx_laser(hw);
4929
4930 /* Update the stack */
4931 adapter->link_up = FALSE;
4932 ixgbe_update_link_status(adapter);
4933
4934 /* reprogram the RAR[0] in case user changed it. */
4935 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4936
4937 return;
4938 } /* ixgbe_stop_locked */
4939
4940 /************************************************************************
4941 * ixgbe_update_link_status - Update OS on link state
4942 *
4943 * Note: Only updates the OS on the cached link state.
4944 * The real check of the hardware only happens with
4945 * a link interrupt.
4946 ************************************************************************/
4947 static void
4948 ixgbe_update_link_status(struct adapter *adapter)
4949 {
4950 struct ifnet *ifp = adapter->ifp;
4951 device_t dev = adapter->dev;
4952 struct ixgbe_hw *hw = &adapter->hw;
4953
4954 KASSERT(mutex_owned(&adapter->core_mtx));
4955
4956 if (adapter->link_up) {
4957 if (adapter->link_active != LINK_STATE_UP) {
4958 /*
4959 * To eliminate influence of the previous state
4960 * in the same way as ixgbe_init_locked().
4961 */
4962 struct ix_queue *que = adapter->queues;
4963 for (int i = 0; i < adapter->num_queues; i++, que++)
4964 que->eitr_setting = 0;
4965
4966 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4967 /*
4968 * Discard count for both MAC Local Fault and
4969 * Remote Fault because those registers are
4970 * valid only when the link speed is up and
4971 * 10Gbps.
4972 */
4973 IXGBE_READ_REG(hw, IXGBE_MLFC);
4974 IXGBE_READ_REG(hw, IXGBE_MRFC);
4975 }
4976
4977 if (bootverbose) {
4978 const char *bpsmsg;
4979
4980 switch (adapter->link_speed) {
4981 case IXGBE_LINK_SPEED_10GB_FULL:
4982 bpsmsg = "10 Gbps";
4983 break;
4984 case IXGBE_LINK_SPEED_5GB_FULL:
4985 bpsmsg = "5 Gbps";
4986 break;
4987 case IXGBE_LINK_SPEED_2_5GB_FULL:
4988 bpsmsg = "2.5 Gbps";
4989 break;
4990 case IXGBE_LINK_SPEED_1GB_FULL:
4991 bpsmsg = "1 Gbps";
4992 break;
4993 case IXGBE_LINK_SPEED_100_FULL:
4994 bpsmsg = "100 Mbps";
4995 break;
4996 case IXGBE_LINK_SPEED_10_FULL:
4997 bpsmsg = "10 Mbps";
4998 break;
4999 default:
5000 bpsmsg = "unknown speed";
5001 break;
5002 }
5003 device_printf(dev, "Link is up %s %s \n",
5004 bpsmsg, "Full Duplex");
5005 }
5006 adapter->link_active = LINK_STATE_UP;
5007 /* Update any Flow Control changes */
5008 ixgbe_fc_enable(&adapter->hw);
5009 /* Update DMA coalescing config */
5010 ixgbe_config_dmac(adapter);
5011 if_link_state_change(ifp, LINK_STATE_UP);
5012
5013 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5014 ixgbe_ping_all_vfs(adapter);
5015 }
5016 } else {
5017 /*
5018 * Do it when link active changes to DOWN. i.e.
5019 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5020 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5021 */
5022 if (adapter->link_active != LINK_STATE_DOWN) {
5023 if (bootverbose)
5024 device_printf(dev, "Link is Down\n");
5025 if_link_state_change(ifp, LINK_STATE_DOWN);
5026 adapter->link_active = LINK_STATE_DOWN;
5027 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5028 ixgbe_ping_all_vfs(adapter);
5029 ixgbe_drain_all(adapter);
5030 }
5031 }
5032 } /* ixgbe_update_link_status */
5033
5034 /************************************************************************
5035 * ixgbe_config_dmac - Configure DMA Coalescing
5036 ************************************************************************/
5037 static void
5038 ixgbe_config_dmac(struct adapter *adapter)
5039 {
5040 struct ixgbe_hw *hw = &adapter->hw;
5041 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5042
5043 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5044 return;
5045
5046 if (dcfg->watchdog_timer ^ adapter->dmac ||
5047 dcfg->link_speed ^ adapter->link_speed) {
5048 dcfg->watchdog_timer = adapter->dmac;
5049 dcfg->fcoe_en = false;
5050 dcfg->link_speed = adapter->link_speed;
5051 dcfg->num_tcs = 1;
5052
5053 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5054 dcfg->watchdog_timer, dcfg->link_speed);
5055
5056 hw->mac.ops.dmac_config(hw);
5057 }
5058 } /* ixgbe_config_dmac */
5059
5060 /************************************************************************
5061 * ixgbe_enable_intr
5062 ************************************************************************/
5063 static void
5064 ixgbe_enable_intr(struct adapter *adapter)
5065 {
5066 struct ixgbe_hw *hw = &adapter->hw;
5067 struct ix_queue *que = adapter->queues;
5068 u32 mask, fwsm;
5069
5070 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5071
5072 switch (adapter->hw.mac.type) {
5073 case ixgbe_mac_82599EB:
5074 mask |= IXGBE_EIMS_ECC;
5075 /* Temperature sensor on some adapters */
5076 mask |= IXGBE_EIMS_GPI_SDP0;
5077 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5078 mask |= IXGBE_EIMS_GPI_SDP1;
5079 mask |= IXGBE_EIMS_GPI_SDP2;
5080 break;
5081 case ixgbe_mac_X540:
5082 /* Detect if Thermal Sensor is enabled */
5083 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5084 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5085 mask |= IXGBE_EIMS_TS;
5086 mask |= IXGBE_EIMS_ECC;
5087 break;
5088 case ixgbe_mac_X550:
5089 /* MAC thermal sensor is automatically enabled */
5090 mask |= IXGBE_EIMS_TS;
5091 mask |= IXGBE_EIMS_ECC;
5092 break;
5093 case ixgbe_mac_X550EM_x:
5094 case ixgbe_mac_X550EM_a:
5095 /* Some devices use SDP0 for important information */
5096 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5097 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5098 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5099 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5100 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5101 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5102 mask |= IXGBE_EICR_GPI_SDP0_X540;
5103 mask |= IXGBE_EIMS_ECC;
5104 break;
5105 default:
5106 break;
5107 }
5108
5109 /* Enable Fan Failure detection */
5110 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5111 mask |= IXGBE_EIMS_GPI_SDP1;
5112 /* Enable SR-IOV */
5113 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5114 mask |= IXGBE_EIMS_MAILBOX;
5115 /* Enable Flow Director */
5116 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5117 mask |= IXGBE_EIMS_FLOW_DIR;
5118
5119 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5120
5121 /* With MSI-X we use auto clear */
5122 if (adapter->msix_mem) {
5123 /*
5124 * It's not required to set TCP_TIMER because we don't use
5125 * it.
5126 */
5127 IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE);
5128 }
5129
5130 /*
5131 * Now enable all queues, this is done separately to
5132 * allow for handling the extended (beyond 32) MSI-X
5133 * vectors that can be used by 82599
5134 */
5135 for (int i = 0; i < adapter->num_queues; i++, que++)
5136 ixgbe_enable_queue(adapter, que->msix);
5137
5138 IXGBE_WRITE_FLUSH(hw);
5139
5140 } /* ixgbe_enable_intr */
5141
5142 /************************************************************************
5143 * ixgbe_disable_intr_internal
5144 ************************************************************************/
5145 static void
5146 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5147 {
5148 struct ix_queue *que = adapter->queues;
5149
5150 /* disable interrupts other than queues */
5151 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5152
5153 if (adapter->msix_mem)
5154 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5155
5156 for (int i = 0; i < adapter->num_queues; i++, que++)
5157 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5158
5159 IXGBE_WRITE_FLUSH(&adapter->hw);
5160
5161 } /* ixgbe_do_disable_intr_internal */
5162
5163 /************************************************************************
5164 * ixgbe_disable_intr
5165 ************************************************************************/
5166 static void
5167 ixgbe_disable_intr(struct adapter *adapter)
5168 {
5169
5170 ixgbe_disable_intr_internal(adapter, true);
5171 } /* ixgbe_disable_intr */
5172
5173 /************************************************************************
5174 * ixgbe_ensure_disabled_intr
5175 ************************************************************************/
5176 void
5177 ixgbe_ensure_disabled_intr(struct adapter *adapter)
5178 {
5179
5180 ixgbe_disable_intr_internal(adapter, false);
5181 } /* ixgbe_ensure_disabled_intr */
5182
5183 /************************************************************************
5184 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5185 ************************************************************************/
5186 static int
5187 ixgbe_legacy_irq(void *arg)
5188 {
5189 struct ix_queue *que = arg;
5190 struct adapter *adapter = que->adapter;
5191 struct ixgbe_hw *hw = &adapter->hw;
5192 struct tx_ring *txr = adapter->tx_rings;
5193 u32 eicr, eicr_mask;
5194 u32 eims_orig;
5195 u32 eims_enable = 0;
5196 u32 eims_disable = 0;
5197 u32 task_requests = 0;
5198 s32 retval;
5199
5200 eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS);
5201 /*
5202 * Silicon errata #26 on 82598. Disable all interrupts before reading
5203 * EICR.
5204 */
5205 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5206
5207 /* Read and clear EICR */
5208 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5209
5210 adapter->stats.pf.legint.ev_count++;
5211 if (eicr == 0) {
5212 adapter->stats.pf.intzero.ev_count++;
5213 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
5214 return 0;
5215 }
5216
5217 /* Queue (0) intr */
5218 if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
5219 ++que->irqs.ev_count;
5220
5221 /*
5222 * The same as ixgbe_msix_que() about
5223 * "que->txrx_use_workqueue".
5224 */
5225 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5226
5227 IXGBE_TX_LOCK(txr);
5228 ixgbe_txeof(txr);
5229 #ifdef notyet
5230 if (!ixgbe_ring_empty(ifp, txr->br))
5231 ixgbe_start_locked(ifp, txr);
5232 #endif
5233 IXGBE_TX_UNLOCK(txr);
5234
5235 que->req.ev_count++;
5236 ixgbe_sched_handle_que(adapter, que);
5237 /* Disable queue 0 interrupt */
5238 eims_disable |= 1UL << 0;
5239
5240 } else
5241 eims_enable |= IXGBE_EIMC_RTX_QUEUE;
5242
5243 /* Link status change */
5244 if (eicr & IXGBE_EICR_LSC) {
5245 task_requests |= IXGBE_REQUEST_TASK_LSC;
5246 eims_disable |= IXGBE_EIMS_LSC;
5247 }
5248
5249 if (ixgbe_is_sfp(hw)) {
5250 /* Pluggable optics-related interrupt */
5251 if (hw->mac.type >= ixgbe_mac_X540)
5252 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5253 else
5254 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5255
5256 /*
5257 * An interrupt might not arrive when a module is inserted.
5258 * When an link status change interrupt occurred and the driver
5259 * still regard SFP as unplugged, issue the module softint
5260 * and then issue LSC interrupt.
5261 */
5262 if ((eicr & eicr_mask)
5263 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
5264 && (eicr & IXGBE_EICR_LSC))) {
5265 task_requests |= IXGBE_REQUEST_TASK_MOD;
5266 eims_disable |= IXGBE_EIMS_LSC;
5267 }
5268
5269 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5270 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5271 task_requests |= IXGBE_REQUEST_TASK_MSF;
5272 eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
5273 }
5274 }
5275
5276 /* Check for fan failure */
5277 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5278 retval = ixgbe_check_fan_failure(adapter, eicr, true);
5279 if (retval == IXGBE_ERR_FAN_FAILURE) {
5280 /* Disable interrupt to prevent log spam */
5281 eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
5282 }
5283 }
5284
5285 /* External PHY interrupt */
5286 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5287 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
5288 task_requests |= IXGBE_REQUEST_TASK_PHY;
5289 eims_disable |= IXGBE_EICR_GPI_SDP0_X540;
5290 }
5291
5292 if (task_requests != 0) {
5293 mutex_enter(&adapter->admin_mtx);
5294 adapter->task_requests |= task_requests;
5295 ixgbe_schedule_admin_tasklet(adapter);
5296 mutex_exit(&adapter->admin_mtx);
5297 }
5298
5299 /* Re-enable some interrupts */
5300 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
5301 (eims_orig & ~eims_disable) | eims_enable);
5302
5303 return 1;
5304 } /* ixgbe_legacy_irq */
5305
5306 /************************************************************************
5307 * ixgbe_free_pciintr_resources
5308 ************************************************************************/
5309 static void
5310 ixgbe_free_pciintr_resources(struct adapter *adapter)
5311 {
5312 struct ix_queue *que = adapter->queues;
5313 int rid;
5314
5315 /*
5316 * Release all msix queue resources:
5317 */
5318 for (int i = 0; i < adapter->num_queues; i++, que++) {
5319 if (que->res != NULL) {
5320 pci_intr_disestablish(adapter->osdep.pc,
5321 adapter->osdep.ihs[i]);
5322 adapter->osdep.ihs[i] = NULL;
5323 }
5324 }
5325
5326 /* Clean the Legacy or Link interrupt last */
5327 if (adapter->vector) /* we are doing MSIX */
5328 rid = adapter->vector;
5329 else
5330 rid = 0;
5331
5332 if (adapter->osdep.ihs[rid] != NULL) {
5333 pci_intr_disestablish(adapter->osdep.pc,
5334 adapter->osdep.ihs[rid]);
5335 adapter->osdep.ihs[rid] = NULL;
5336 }
5337
5338 if (adapter->osdep.intrs != NULL) {
5339 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5340 adapter->osdep.nintrs);
5341 adapter->osdep.intrs = NULL;
5342 }
5343 } /* ixgbe_free_pciintr_resources */
5344
5345 /************************************************************************
5346 * ixgbe_free_pci_resources
5347 ************************************************************************/
5348 static void
5349 ixgbe_free_pci_resources(struct adapter *adapter)
5350 {
5351
5352 ixgbe_free_pciintr_resources(adapter);
5353
5354 if (adapter->osdep.mem_size != 0) {
5355 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5356 adapter->osdep.mem_bus_space_handle,
5357 adapter->osdep.mem_size);
5358 }
5359
5360 } /* ixgbe_free_pci_resources */
5361
5362 /************************************************************************
5363 * ixgbe_set_sysctl_value
5364 ************************************************************************/
5365 static void
5366 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5367 const char *description, int *limit, int value)
5368 {
5369 device_t dev = adapter->dev;
5370 struct sysctllog **log;
5371 const struct sysctlnode *rnode, *cnode;
5372
5373 /*
5374 * It's not required to check recovery mode because this function never
5375 * touches hardware.
5376 */
5377
5378 log = &adapter->sysctllog;
5379 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5380 aprint_error_dev(dev, "could not create sysctl root\n");
5381 return;
5382 }
5383 if (sysctl_createv(log, 0, &rnode, &cnode,
5384 CTLFLAG_READWRITE, CTLTYPE_INT,
5385 name, SYSCTL_DESCR(description),
5386 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5387 aprint_error_dev(dev, "could not create sysctl\n");
5388 *limit = value;
5389 } /* ixgbe_set_sysctl_value */
5390
5391 /************************************************************************
5392 * ixgbe_sysctl_flowcntl
5393 *
5394 * SYSCTL wrapper around setting Flow Control
5395 ************************************************************************/
5396 static int
5397 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5398 {
5399 struct sysctlnode node = *rnode;
5400 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5401 int error, fc;
5402
5403 if (ixgbe_fw_recovery_mode_swflag(adapter))
5404 return (EPERM);
5405
5406 fc = adapter->hw.fc.current_mode;
5407 node.sysctl_data = &fc;
5408 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5409 if (error != 0 || newp == NULL)
5410 return error;
5411
5412 /* Don't bother if it's not changed */
5413 if (fc == adapter->hw.fc.current_mode)
5414 return (0);
5415
5416 return ixgbe_set_flowcntl(adapter, fc);
5417 } /* ixgbe_sysctl_flowcntl */
5418
5419 /************************************************************************
5420 * ixgbe_set_flowcntl - Set flow control
5421 *
5422 * Flow control values:
5423 * 0 - off
5424 * 1 - rx pause
5425 * 2 - tx pause
5426 * 3 - full
5427 ************************************************************************/
5428 static int
5429 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5430 {
5431 switch (fc) {
5432 case ixgbe_fc_rx_pause:
5433 case ixgbe_fc_tx_pause:
5434 case ixgbe_fc_full:
5435 adapter->hw.fc.requested_mode = fc;
5436 if (adapter->num_queues > 1)
5437 ixgbe_disable_rx_drop(adapter);
5438 break;
5439 case ixgbe_fc_none:
5440 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5441 if (adapter->num_queues > 1)
5442 ixgbe_enable_rx_drop(adapter);
5443 break;
5444 default:
5445 return (EINVAL);
5446 }
5447
5448 #if 0 /* XXX NetBSD */
5449 /* Don't autoneg if forcing a value */
5450 adapter->hw.fc.disable_fc_autoneg = TRUE;
5451 #endif
5452 ixgbe_fc_enable(&adapter->hw);
5453
5454 return (0);
5455 } /* ixgbe_set_flowcntl */
5456
5457 /************************************************************************
5458 * ixgbe_enable_rx_drop
5459 *
5460 * Enable the hardware to drop packets when the buffer is
5461 * full. This is useful with multiqueue, so that no single
5462 * queue being full stalls the entire RX engine. We only
5463 * enable this when Multiqueue is enabled AND Flow Control
5464 * is disabled.
5465 ************************************************************************/
5466 static void
5467 ixgbe_enable_rx_drop(struct adapter *adapter)
5468 {
5469 struct ixgbe_hw *hw = &adapter->hw;
5470 struct rx_ring *rxr;
5471 u32 srrctl;
5472
5473 for (int i = 0; i < adapter->num_queues; i++) {
5474 rxr = &adapter->rx_rings[i];
5475 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5476 srrctl |= IXGBE_SRRCTL_DROP_EN;
5477 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5478 }
5479
5480 /* enable drop for each vf */
5481 for (int i = 0; i < adapter->num_vfs; i++) {
5482 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5483 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5484 IXGBE_QDE_ENABLE));
5485 }
5486 } /* ixgbe_enable_rx_drop */
5487
5488 /************************************************************************
5489 * ixgbe_disable_rx_drop
5490 ************************************************************************/
5491 static void
5492 ixgbe_disable_rx_drop(struct adapter *adapter)
5493 {
5494 struct ixgbe_hw *hw = &adapter->hw;
5495 struct rx_ring *rxr;
5496 u32 srrctl;
5497
5498 for (int i = 0; i < adapter->num_queues; i++) {
5499 rxr = &adapter->rx_rings[i];
5500 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5501 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5502 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5503 }
5504
5505 /* disable drop for each vf */
5506 for (int i = 0; i < adapter->num_vfs; i++) {
5507 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5508 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5509 }
5510 } /* ixgbe_disable_rx_drop */
5511
5512 /************************************************************************
5513 * ixgbe_sysctl_advertise
5514 *
5515 * SYSCTL wrapper around setting advertised speed
5516 ************************************************************************/
5517 static int
5518 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5519 {
5520 struct sysctlnode node = *rnode;
5521 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5522 int error = 0, advertise;
5523
5524 if (ixgbe_fw_recovery_mode_swflag(adapter))
5525 return (EPERM);
5526
5527 advertise = adapter->advertise;
5528 node.sysctl_data = &advertise;
5529 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5530 if (error != 0 || newp == NULL)
5531 return error;
5532
5533 return ixgbe_set_advertise(adapter, advertise);
5534 } /* ixgbe_sysctl_advertise */
5535
5536 /************************************************************************
5537 * ixgbe_set_advertise - Control advertised link speed
5538 *
5539 * Flags:
5540 * 0x00 - Default (all capable link speed)
5541 * 0x01 - advertise 100 Mb
5542 * 0x02 - advertise 1G
5543 * 0x04 - advertise 10G
5544 * 0x08 - advertise 10 Mb
5545 * 0x10 - advertise 2.5G
5546 * 0x20 - advertise 5G
5547 ************************************************************************/
5548 static int
5549 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5550 {
5551 device_t dev;
5552 struct ixgbe_hw *hw;
5553 ixgbe_link_speed speed = 0;
5554 ixgbe_link_speed link_caps = 0;
5555 s32 err = IXGBE_NOT_IMPLEMENTED;
5556 bool negotiate = FALSE;
5557
5558 /* Checks to validate new value */
5559 if (adapter->advertise == advertise) /* no change */
5560 return (0);
5561
5562 dev = adapter->dev;
5563 hw = &adapter->hw;
5564
5565 /* No speed changes for backplane media */
5566 if (hw->phy.media_type == ixgbe_media_type_backplane)
5567 return (ENODEV);
5568
5569 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5570 (hw->phy.multispeed_fiber))) {
5571 device_printf(dev,
5572 "Advertised speed can only be set on copper or "
5573 "multispeed fiber media types.\n");
5574 return (EINVAL);
5575 }
5576
5577 if (advertise < 0x0 || advertise > 0x3f) {
5578 device_printf(dev,
5579 "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5580 return (EINVAL);
5581 }
5582
5583 if (hw->mac.ops.get_link_capabilities) {
5584 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5585 &negotiate);
5586 if (err != IXGBE_SUCCESS) {
5587 device_printf(dev, "Unable to determine supported advertise speeds\n");
5588 return (ENODEV);
5589 }
5590 }
5591
5592 /* Set new value and report new advertised mode */
5593 if (advertise & 0x1) {
5594 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5595 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5596 return (EINVAL);
5597 }
5598 speed |= IXGBE_LINK_SPEED_100_FULL;
5599 }
5600 if (advertise & 0x2) {
5601 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5602 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5603 return (EINVAL);
5604 }
5605 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5606 }
5607 if (advertise & 0x4) {
5608 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5609 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5610 return (EINVAL);
5611 }
5612 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5613 }
5614 if (advertise & 0x8) {
5615 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5616 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5617 return (EINVAL);
5618 }
5619 speed |= IXGBE_LINK_SPEED_10_FULL;
5620 }
5621 if (advertise & 0x10) {
5622 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5623 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5624 return (EINVAL);
5625 }
5626 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5627 }
5628 if (advertise & 0x20) {
5629 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5630 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5631 return (EINVAL);
5632 }
5633 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5634 }
5635 if (advertise == 0)
5636 speed = link_caps; /* All capable link speed */
5637
5638 hw->mac.autotry_restart = TRUE;
5639 hw->mac.ops.setup_link(hw, speed, TRUE);
5640 adapter->advertise = advertise;
5641
5642 return (0);
5643 } /* ixgbe_set_advertise */
5644
5645 /************************************************************************
5646 * ixgbe_get_advertise - Get current advertised speed settings
5647 *
5648 * Formatted for sysctl usage.
5649 * Flags:
5650 * 0x01 - advertise 100 Mb
5651 * 0x02 - advertise 1G
5652 * 0x04 - advertise 10G
5653 * 0x08 - advertise 10 Mb (yes, Mb)
5654 * 0x10 - advertise 2.5G
5655 * 0x20 - advertise 5G
5656 ************************************************************************/
5657 static int
5658 ixgbe_get_advertise(struct adapter *adapter)
5659 {
5660 struct ixgbe_hw *hw = &adapter->hw;
5661 int speed;
5662 ixgbe_link_speed link_caps = 0;
5663 s32 err;
5664 bool negotiate = FALSE;
5665
5666 /*
5667 * Advertised speed means nothing unless it's copper or
5668 * multi-speed fiber
5669 */
5670 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5671 !(hw->phy.multispeed_fiber))
5672 return (0);
5673
5674 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5675 if (err != IXGBE_SUCCESS)
5676 return (0);
5677
5678 speed =
5679 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5680 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5681 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5682 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5683 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5684 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5685
5686 return speed;
5687 } /* ixgbe_get_advertise */
5688
5689 /************************************************************************
5690 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5691 *
5692 * Control values:
5693 * 0/1 - off / on (use default value of 1000)
5694 *
5695 * Legal timer values are:
5696 * 50,100,250,500,1000,2000,5000,10000
5697 *
5698 * Turning off interrupt moderation will also turn this off.
5699 ************************************************************************/
5700 static int
5701 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5702 {
5703 struct sysctlnode node = *rnode;
5704 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5705 struct ifnet *ifp = adapter->ifp;
5706 int error;
5707 int newval;
5708
5709 if (ixgbe_fw_recovery_mode_swflag(adapter))
5710 return (EPERM);
5711
5712 newval = adapter->dmac;
5713 node.sysctl_data = &newval;
5714 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5715 if ((error) || (newp == NULL))
5716 return (error);
5717
5718 switch (newval) {
5719 case 0:
5720 /* Disabled */
5721 adapter->dmac = 0;
5722 break;
5723 case 1:
5724 /* Enable and use default */
5725 adapter->dmac = 1000;
5726 break;
5727 case 50:
5728 case 100:
5729 case 250:
5730 case 500:
5731 case 1000:
5732 case 2000:
5733 case 5000:
5734 case 10000:
5735 /* Legal values - allow */
5736 adapter->dmac = newval;
5737 break;
5738 default:
5739 /* Do nothing, illegal value */
5740 return (EINVAL);
5741 }
5742
5743 /* Re-initialize hardware if it's already running */
5744 if (ifp->if_flags & IFF_RUNNING)
5745 ifp->if_init(ifp);
5746
5747 return (0);
5748 }
5749
5750 #ifdef IXGBE_DEBUG
5751 /************************************************************************
5752 * ixgbe_sysctl_power_state
5753 *
5754 * Sysctl to test power states
5755 * Values:
5756 * 0 - set device to D0
5757 * 3 - set device to D3
5758 * (none) - get current device power state
5759 ************************************************************************/
5760 static int
5761 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5762 {
5763 #ifdef notyet
5764 struct sysctlnode node = *rnode;
5765 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5766 device_t dev = adapter->dev;
5767 int curr_ps, new_ps, error = 0;
5768
5769 if (ixgbe_fw_recovery_mode_swflag(adapter))
5770 return (EPERM);
5771
5772 curr_ps = new_ps = pci_get_powerstate(dev);
5773
5774 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5775 if ((error) || (req->newp == NULL))
5776 return (error);
5777
5778 if (new_ps == curr_ps)
5779 return (0);
5780
5781 if (new_ps == 3 && curr_ps == 0)
5782 error = DEVICE_SUSPEND(dev);
5783 else if (new_ps == 0 && curr_ps == 3)
5784 error = DEVICE_RESUME(dev);
5785 else
5786 return (EINVAL);
5787
5788 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5789
5790 return (error);
5791 #else
5792 return 0;
5793 #endif
5794 } /* ixgbe_sysctl_power_state */
5795 #endif
5796
5797 /************************************************************************
5798 * ixgbe_sysctl_wol_enable
5799 *
5800 * Sysctl to enable/disable the WoL capability,
5801 * if supported by the adapter.
5802 *
5803 * Values:
5804 * 0 - disabled
5805 * 1 - enabled
5806 ************************************************************************/
5807 static int
5808 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5809 {
5810 struct sysctlnode node = *rnode;
5811 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5812 struct ixgbe_hw *hw = &adapter->hw;
5813 bool new_wol_enabled;
5814 int error = 0;
5815
5816 /*
5817 * It's not required to check recovery mode because this function never
5818 * touches hardware.
5819 */
5820 new_wol_enabled = hw->wol_enabled;
5821 node.sysctl_data = &new_wol_enabled;
5822 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5823 if ((error) || (newp == NULL))
5824 return (error);
5825 if (new_wol_enabled == hw->wol_enabled)
5826 return (0);
5827
5828 if (new_wol_enabled && !adapter->wol_support)
5829 return (ENODEV);
5830 else
5831 hw->wol_enabled = new_wol_enabled;
5832
5833 return (0);
5834 } /* ixgbe_sysctl_wol_enable */
5835
5836 /************************************************************************
5837 * ixgbe_sysctl_wufc - Wake Up Filter Control
5838 *
5839 * Sysctl to enable/disable the types of packets that the
5840 * adapter will wake up on upon receipt.
5841 * Flags:
5842 * 0x1 - Link Status Change
5843 * 0x2 - Magic Packet
5844 * 0x4 - Direct Exact
5845 * 0x8 - Directed Multicast
5846 * 0x10 - Broadcast
5847 * 0x20 - ARP/IPv4 Request Packet
5848 * 0x40 - Direct IPv4 Packet
5849 * 0x80 - Direct IPv6 Packet
5850 *
5851 * Settings not listed above will cause the sysctl to return an error.
5852 ************************************************************************/
5853 static int
5854 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5855 {
5856 struct sysctlnode node = *rnode;
5857 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5858 int error = 0;
5859 u32 new_wufc;
5860
5861 /*
5862 * It's not required to check recovery mode because this function never
5863 * touches hardware.
5864 */
5865 new_wufc = adapter->wufc;
5866 node.sysctl_data = &new_wufc;
5867 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5868 if ((error) || (newp == NULL))
5869 return (error);
5870 if (new_wufc == adapter->wufc)
5871 return (0);
5872
5873 if (new_wufc & 0xffffff00)
5874 return (EINVAL);
5875
5876 new_wufc &= 0xff;
5877 new_wufc |= (0xffffff & adapter->wufc);
5878 adapter->wufc = new_wufc;
5879
5880 return (0);
5881 } /* ixgbe_sysctl_wufc */
5882
5883 #ifdef IXGBE_DEBUG
5884 /************************************************************************
5885 * ixgbe_sysctl_print_rss_config
5886 ************************************************************************/
5887 static int
5888 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5889 {
5890 #ifdef notyet
5891 struct sysctlnode node = *rnode;
5892 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5893 struct ixgbe_hw *hw = &adapter->hw;
5894 device_t dev = adapter->dev;
5895 struct sbuf *buf;
5896 int error = 0, reta_size;
5897 u32 reg;
5898
5899 if (ixgbe_fw_recovery_mode_swflag(adapter))
5900 return (EPERM);
5901
5902 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5903 if (!buf) {
5904 device_printf(dev, "Could not allocate sbuf for output.\n");
5905 return (ENOMEM);
5906 }
5907
5908 // TODO: use sbufs to make a string to print out
5909 /* Set multiplier for RETA setup and table size based on MAC */
5910 switch (adapter->hw.mac.type) {
5911 case ixgbe_mac_X550:
5912 case ixgbe_mac_X550EM_x:
5913 case ixgbe_mac_X550EM_a:
5914 reta_size = 128;
5915 break;
5916 default:
5917 reta_size = 32;
5918 break;
5919 }
5920
5921 /* Print out the redirection table */
5922 sbuf_cat(buf, "\n");
5923 for (int i = 0; i < reta_size; i++) {
5924 if (i < 32) {
5925 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5926 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5927 } else {
5928 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5929 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5930 }
5931 }
5932
5933 // TODO: print more config
5934
5935 error = sbuf_finish(buf);
5936 if (error)
5937 device_printf(dev, "Error finishing sbuf: %d\n", error);
5938
5939 sbuf_delete(buf);
5940 #endif
5941 return (0);
5942 } /* ixgbe_sysctl_print_rss_config */
5943 #endif /* IXGBE_DEBUG */
5944
5945 /************************************************************************
5946 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5947 *
5948 * For X552/X557-AT devices using an external PHY
5949 ************************************************************************/
5950 static int
5951 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5952 {
5953 struct sysctlnode node = *rnode;
5954 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5955 struct ixgbe_hw *hw = &adapter->hw;
5956 int val;
5957 u16 reg;
5958 int error;
5959
5960 if (ixgbe_fw_recovery_mode_swflag(adapter))
5961 return (EPERM);
5962
5963 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5964 device_printf(adapter->dev,
5965 "Device has no supported external thermal sensor.\n");
5966 return (ENODEV);
5967 }
5968
5969 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5970 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5971 device_printf(adapter->dev,
5972 "Error reading from PHY's current temperature register\n");
5973 return (EAGAIN);
5974 }
5975
5976 node.sysctl_data = &val;
5977
5978 /* Shift temp for output */
5979 val = reg >> 8;
5980
5981 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5982 if ((error) || (newp == NULL))
5983 return (error);
5984
5985 return (0);
5986 } /* ixgbe_sysctl_phy_temp */
5987
5988 /************************************************************************
5989 * ixgbe_sysctl_phy_overtemp_occurred
5990 *
5991 * Reports (directly from the PHY) whether the current PHY
5992 * temperature is over the overtemp threshold.
5993 ************************************************************************/
5994 static int
5995 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5996 {
5997 struct sysctlnode node = *rnode;
5998 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5999 struct ixgbe_hw *hw = &adapter->hw;
6000 int val, error;
6001 u16 reg;
6002
6003 if (ixgbe_fw_recovery_mode_swflag(adapter))
6004 return (EPERM);
6005
6006 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
6007 device_printf(adapter->dev,
6008 "Device has no supported external thermal sensor.\n");
6009 return (ENODEV);
6010 }
6011
6012 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
6013 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
6014 device_printf(adapter->dev,
6015 "Error reading from PHY's temperature status register\n");
6016 return (EAGAIN);
6017 }
6018
6019 node.sysctl_data = &val;
6020
6021 /* Get occurrence bit */
6022 val = !!(reg & 0x4000);
6023
6024 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6025 if ((error) || (newp == NULL))
6026 return (error);
6027
6028 return (0);
6029 } /* ixgbe_sysctl_phy_overtemp_occurred */
6030
6031 /************************************************************************
6032 * ixgbe_sysctl_eee_state
6033 *
6034 * Sysctl to set EEE power saving feature
6035 * Values:
6036 * 0 - disable EEE
6037 * 1 - enable EEE
6038 * (none) - get current device EEE state
6039 ************************************************************************/
6040 static int
6041 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6042 {
6043 struct sysctlnode node = *rnode;
6044 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6045 struct ifnet *ifp = adapter->ifp;
6046 device_t dev = adapter->dev;
6047 int curr_eee, new_eee, error = 0;
6048 s32 retval;
6049
6050 if (ixgbe_fw_recovery_mode_swflag(adapter))
6051 return (EPERM);
6052
6053 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
6054 node.sysctl_data = &new_eee;
6055 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6056 if ((error) || (newp == NULL))
6057 return (error);
6058
6059 /* Nothing to do */
6060 if (new_eee == curr_eee)
6061 return (0);
6062
6063 /* Not supported */
6064 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
6065 return (EINVAL);
6066
6067 /* Bounds checking */
6068 if ((new_eee < 0) || (new_eee > 1))
6069 return (EINVAL);
6070
6071 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
6072 if (retval) {
6073 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
6074 return (EINVAL);
6075 }
6076
6077 /* Restart auto-neg */
6078 ifp->if_init(ifp);
6079
6080 device_printf(dev, "New EEE state: %d\n", new_eee);
6081
6082 /* Cache new value */
6083 if (new_eee)
6084 adapter->feat_en |= IXGBE_FEATURE_EEE;
6085 else
6086 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
6087
6088 return (error);
6089 } /* ixgbe_sysctl_eee_state */
6090
6091 #define PRINTQS(adapter, regname) \
6092 do { \
6093 struct ixgbe_hw *_hw = &(adapter)->hw; \
6094 int _i; \
6095 \
6096 printf("%s: %s", device_xname((adapter)->dev), #regname); \
6097 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
6098 printf((_i == 0) ? "\t" : " "); \
6099 printf("%08x", IXGBE_READ_REG(_hw, \
6100 IXGBE_##regname(_i))); \
6101 } \
6102 printf("\n"); \
6103 } while (0)
6104
6105 /************************************************************************
6106 * ixgbe_print_debug_info
6107 *
6108 * Called only when em_display_debug_stats is enabled.
6109 * Provides a way to take a look at important statistics
6110 * maintained by the driver and hardware.
6111 ************************************************************************/
6112 static void
6113 ixgbe_print_debug_info(struct adapter *adapter)
6114 {
6115 device_t dev = adapter->dev;
6116 struct ixgbe_hw *hw = &adapter->hw;
6117 int table_size;
6118 int i;
6119
6120 switch (adapter->hw.mac.type) {
6121 case ixgbe_mac_X550:
6122 case ixgbe_mac_X550EM_x:
6123 case ixgbe_mac_X550EM_a:
6124 table_size = 128;
6125 break;
6126 default:
6127 table_size = 32;
6128 break;
6129 }
6130
6131 device_printf(dev, "[E]RETA:\n");
6132 for (i = 0; i < table_size; i++) {
6133 if (i < 32)
6134 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6135 IXGBE_RETA(i)));
6136 else
6137 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6138 IXGBE_ERETA(i - 32)));
6139 }
6140
6141 device_printf(dev, "queue:");
6142 for (i = 0; i < adapter->num_queues; i++) {
6143 printf((i == 0) ? "\t" : " ");
6144 printf("%8d", i);
6145 }
6146 printf("\n");
6147 PRINTQS(adapter, RDBAL);
6148 PRINTQS(adapter, RDBAH);
6149 PRINTQS(adapter, RDLEN);
6150 PRINTQS(adapter, SRRCTL);
6151 PRINTQS(adapter, RDH);
6152 PRINTQS(adapter, RDT);
6153 PRINTQS(adapter, RXDCTL);
6154
6155 device_printf(dev, "RQSMR:");
6156 for (i = 0; i < adapter->num_queues / 4; i++) {
6157 printf((i == 0) ? "\t" : " ");
6158 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6159 }
6160 printf("\n");
6161
6162 device_printf(dev, "disabled_count:");
6163 for (i = 0; i < adapter->num_queues; i++) {
6164 printf((i == 0) ? "\t" : " ");
6165 printf("%8d", adapter->queues[i].disabled_count);
6166 }
6167 printf("\n");
6168
6169 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6170 if (hw->mac.type != ixgbe_mac_82598EB) {
6171 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6172 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6173 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6174 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6175 }
6176 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6177 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6178 } /* ixgbe_print_debug_info */
6179
6180 /************************************************************************
6181 * ixgbe_sysctl_debug
6182 ************************************************************************/
6183 static int
6184 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6185 {
6186 struct sysctlnode node = *rnode;
6187 struct adapter *adapter = (struct adapter *)node.sysctl_data;
6188 int error, result = 0;
6189
6190 if (ixgbe_fw_recovery_mode_swflag(adapter))
6191 return (EPERM);
6192
6193 node.sysctl_data = &result;
6194 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6195
6196 if (error || newp == NULL)
6197 return error;
6198
6199 if (result == 1)
6200 ixgbe_print_debug_info(adapter);
6201
6202 return 0;
6203 } /* ixgbe_sysctl_debug */
6204
6205 /************************************************************************
6206 * ixgbe_init_device_features
6207 ************************************************************************/
6208 static void
6209 ixgbe_init_device_features(struct adapter *adapter)
6210 {
6211 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6212 | IXGBE_FEATURE_RSS
6213 | IXGBE_FEATURE_MSI
6214 | IXGBE_FEATURE_MSIX
6215 | IXGBE_FEATURE_LEGACY_IRQ
6216 | IXGBE_FEATURE_LEGACY_TX;
6217
6218 /* Set capabilities first... */
6219 switch (adapter->hw.mac.type) {
6220 case ixgbe_mac_82598EB:
6221 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6222 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6223 break;
6224 case ixgbe_mac_X540:
6225 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6226 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6227 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6228 (adapter->hw.bus.func == 0))
6229 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6230 break;
6231 case ixgbe_mac_X550:
6232 /*
6233 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6234 * NVM Image version.
6235 */
6236 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6237 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6238 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6239 break;
6240 case ixgbe_mac_X550EM_x:
6241 /*
6242 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6243 * NVM Image version.
6244 */
6245 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6246 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6247 break;
6248 case ixgbe_mac_X550EM_a:
6249 /*
6250 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6251 * NVM Image version.
6252 */
6253 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6254 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6255 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6256 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6257 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6258 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6259 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6260 }
6261 break;
6262 case ixgbe_mac_82599EB:
6263 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6264 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6265 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6266 (adapter->hw.bus.func == 0))
6267 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6268 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6269 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6270 break;
6271 default:
6272 break;
6273 }
6274
6275 /* Enabled by default... */
6276 /* Fan failure detection */
6277 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6278 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6279 /* Netmap */
6280 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6281 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6282 /* EEE */
6283 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6284 adapter->feat_en |= IXGBE_FEATURE_EEE;
6285 /* Thermal Sensor */
6286 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6287 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6288 /*
6289 * Recovery mode:
6290 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6291 * NVM Image version.
6292 */
6293
6294 /* Enabled via global sysctl... */
6295 /* Flow Director */
6296 if (ixgbe_enable_fdir) {
6297 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6298 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6299 else
6300 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6301 }
6302 /* Legacy (single queue) transmit */
6303 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6304 ixgbe_enable_legacy_tx)
6305 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6306 /*
6307 * Message Signal Interrupts - Extended (MSI-X)
6308 * Normal MSI is only enabled if MSI-X calls fail.
6309 */
6310 if (!ixgbe_enable_msix)
6311 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6312 /* Receive-Side Scaling (RSS) */
6313 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6314 adapter->feat_en |= IXGBE_FEATURE_RSS;
6315
6316 /* Disable features with unmet dependencies... */
6317 /* No MSI-X */
6318 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6319 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6320 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6321 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6322 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6323 }
6324 } /* ixgbe_init_device_features */
6325
6326 /************************************************************************
6327 * ixgbe_probe - Device identification routine
6328 *
6329 * Determines if the driver should be loaded on
6330 * adapter based on its PCI vendor/device ID.
6331 *
6332 * return BUS_PROBE_DEFAULT on success, positive on failure
6333 ************************************************************************/
6334 static int
6335 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6336 {
6337 const struct pci_attach_args *pa = aux;
6338
6339 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6340 }
6341
6342 static const ixgbe_vendor_info_t *
6343 ixgbe_lookup(const struct pci_attach_args *pa)
6344 {
6345 const ixgbe_vendor_info_t *ent;
6346 pcireg_t subid;
6347
6348 INIT_DEBUGOUT("ixgbe_lookup: begin");
6349
6350 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6351 return NULL;
6352
6353 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6354
6355 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6356 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6357 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6358 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6359 (ent->subvendor_id == 0)) &&
6360 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6361 (ent->subdevice_id == 0))) {
6362 return ent;
6363 }
6364 }
6365 return NULL;
6366 }
6367
6368 static int
6369 ixgbe_ifflags_cb(struct ethercom *ec)
6370 {
6371 struct ifnet *ifp = &ec->ec_if;
6372 struct adapter *adapter = ifp->if_softc;
6373 u_short change;
6374 int rv = 0;
6375
6376 IXGBE_CORE_LOCK(adapter);
6377
6378 change = ifp->if_flags ^ adapter->if_flags;
6379 if (change != 0)
6380 adapter->if_flags = ifp->if_flags;
6381
6382 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6383 rv = ENETRESET;
6384 goto out;
6385 } else if ((change & IFF_PROMISC) != 0)
6386 ixgbe_set_rxfilter(adapter);
6387
6388 /* Check for ec_capenable. */
6389 change = ec->ec_capenable ^ adapter->ec_capenable;
6390 adapter->ec_capenable = ec->ec_capenable;
6391 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6392 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6393 rv = ENETRESET;
6394 goto out;
6395 }
6396
6397 /*
6398 * Special handling is not required for ETHERCAP_VLAN_MTU.
6399 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6400 */
6401
6402 /* Set up VLAN support and filter */
6403 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6404 ixgbe_setup_vlan_hw_support(adapter);
6405
6406 out:
6407 IXGBE_CORE_UNLOCK(adapter);
6408
6409 return rv;
6410 }
6411
6412 /************************************************************************
6413 * ixgbe_ioctl - Ioctl entry point
6414 *
6415 * Called when the user wants to configure the interface.
6416 *
6417 * return 0 on success, positive on failure
6418 ************************************************************************/
6419 static int
6420 ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6421 {
6422 struct adapter *adapter = ifp->if_softc;
6423 struct ixgbe_hw *hw = &adapter->hw;
6424 struct ifcapreq *ifcr = data;
6425 struct ifreq *ifr = data;
6426 int error = 0;
6427 int l4csum_en;
6428 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6429 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6430
6431 if (ixgbe_fw_recovery_mode_swflag(adapter))
6432 return (EPERM);
6433
6434 switch (command) {
6435 case SIOCSIFFLAGS:
6436 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6437 break;
6438 case SIOCADDMULTI:
6439 case SIOCDELMULTI:
6440 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6441 break;
6442 case SIOCSIFMEDIA:
6443 case SIOCGIFMEDIA:
6444 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6445 break;
6446 case SIOCSIFCAP:
6447 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6448 break;
6449 case SIOCSIFMTU:
6450 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6451 break;
6452 #ifdef __NetBSD__
6453 case SIOCINITIFADDR:
6454 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6455 break;
6456 case SIOCGIFFLAGS:
6457 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6458 break;
6459 case SIOCGIFAFLAG_IN:
6460 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6461 break;
6462 case SIOCGIFADDR:
6463 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6464 break;
6465 case SIOCGIFMTU:
6466 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6467 break;
6468 case SIOCGIFCAP:
6469 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6470 break;
6471 case SIOCGETHERCAP:
6472 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6473 break;
6474 case SIOCGLIFADDR:
6475 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6476 break;
6477 case SIOCZIFDATA:
6478 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6479 hw->mac.ops.clear_hw_cntrs(hw);
6480 ixgbe_clear_evcnt(adapter);
6481 break;
6482 case SIOCAIFADDR:
6483 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6484 break;
6485 #endif
6486 default:
6487 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6488 break;
6489 }
6490
6491 switch (command) {
6492 case SIOCGI2C:
6493 {
6494 struct ixgbe_i2c_req i2c;
6495
6496 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6497 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6498 if (error != 0)
6499 break;
6500 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6501 error = EINVAL;
6502 break;
6503 }
6504 if (i2c.len > sizeof(i2c.data)) {
6505 error = EINVAL;
6506 break;
6507 }
6508
6509 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6510 i2c.dev_addr, i2c.data);
6511 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6512 break;
6513 }
6514 case SIOCSIFCAP:
6515 /* Layer-4 Rx checksum offload has to be turned on and
6516 * off as a unit.
6517 */
6518 l4csum_en = ifcr->ifcr_capenable & l4csum;
6519 if (l4csum_en != l4csum && l4csum_en != 0)
6520 return EINVAL;
6521 /*FALLTHROUGH*/
6522 case SIOCADDMULTI:
6523 case SIOCDELMULTI:
6524 case SIOCSIFFLAGS:
6525 case SIOCSIFMTU:
6526 default:
6527 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6528 return error;
6529 if ((ifp->if_flags & IFF_RUNNING) == 0)
6530 ;
6531 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6532 IXGBE_CORE_LOCK(adapter);
6533 if ((ifp->if_flags & IFF_RUNNING) != 0)
6534 ixgbe_init_locked(adapter);
6535 ixgbe_recalculate_max_frame(adapter);
6536 IXGBE_CORE_UNLOCK(adapter);
6537 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6538 /*
6539 * Multicast list has changed; set the hardware filter
6540 * accordingly.
6541 */
6542 IXGBE_CORE_LOCK(adapter);
6543 ixgbe_disable_intr(adapter);
6544 ixgbe_set_rxfilter(adapter);
6545 ixgbe_enable_intr(adapter);
6546 IXGBE_CORE_UNLOCK(adapter);
6547 }
6548 return 0;
6549 }
6550
6551 return error;
6552 } /* ixgbe_ioctl */
6553
6554 /************************************************************************
6555 * ixgbe_check_fan_failure
6556 ************************************************************************/
6557 static int
6558 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6559 {
6560 u32 mask;
6561
6562 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6563 IXGBE_ESDP_SDP1;
6564
6565 if (reg & mask) {
6566 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6567 return IXGBE_ERR_FAN_FAILURE;
6568 }
6569
6570 return IXGBE_SUCCESS;
6571 } /* ixgbe_check_fan_failure */
6572
6573 /************************************************************************
6574 * ixgbe_handle_que
6575 ************************************************************************/
6576 static void
6577 ixgbe_handle_que(void *context)
6578 {
6579 struct ix_queue *que = context;
6580 struct adapter *adapter = que->adapter;
6581 struct tx_ring *txr = que->txr;
6582 struct ifnet *ifp = adapter->ifp;
6583 bool more = false;
6584
6585 que->handleq.ev_count++;
6586
6587 if (ifp->if_flags & IFF_RUNNING) {
6588 more = ixgbe_rxeof(que);
6589 IXGBE_TX_LOCK(txr);
6590 more |= ixgbe_txeof(txr);
6591 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6592 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6593 ixgbe_mq_start_locked(ifp, txr);
6594 /* Only for queue 0 */
6595 /* NetBSD still needs this for CBQ */
6596 if ((&adapter->queues[0] == que)
6597 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6598 ixgbe_legacy_start_locked(ifp, txr);
6599 IXGBE_TX_UNLOCK(txr);
6600 }
6601
6602 if (more) {
6603 que->req.ev_count++;
6604 ixgbe_sched_handle_que(adapter, que);
6605 } else if (que->res != NULL) {
6606 /* MSIX: Re-enable this interrupt */
6607 ixgbe_enable_queue(adapter, que->msix);
6608 } else {
6609 /* INTx or MSI */
6610 ixgbe_enable_queue(adapter, 0);
6611 }
6612
6613 return;
6614 } /* ixgbe_handle_que */
6615
6616 /************************************************************************
6617 * ixgbe_handle_que_work
6618 ************************************************************************/
6619 static void
6620 ixgbe_handle_que_work(struct work *wk, void *context)
6621 {
6622 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6623
6624 /*
6625 * "enqueued flag" is not required here.
6626 * See ixgbe_msix_que().
6627 */
6628 ixgbe_handle_que(que);
6629 }
6630
6631 /************************************************************************
6632 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6633 ************************************************************************/
6634 static int
6635 ixgbe_allocate_legacy(struct adapter *adapter,
6636 const struct pci_attach_args *pa)
6637 {
6638 device_t dev = adapter->dev;
6639 struct ix_queue *que = adapter->queues;
6640 struct tx_ring *txr = adapter->tx_rings;
6641 int counts[PCI_INTR_TYPE_SIZE];
6642 pci_intr_type_t intr_type, max_type;
6643 char intrbuf[PCI_INTRSTR_LEN];
6644 char wqname[MAXCOMLEN];
6645 const char *intrstr = NULL;
6646 int defertx_error = 0, error;
6647
6648 /* We allocate a single interrupt resource */
6649 max_type = PCI_INTR_TYPE_MSI;
6650 counts[PCI_INTR_TYPE_MSIX] = 0;
6651 counts[PCI_INTR_TYPE_MSI] =
6652 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6653 /* Check not feat_en but feat_cap to fallback to INTx */
6654 counts[PCI_INTR_TYPE_INTX] =
6655 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6656
6657 alloc_retry:
6658 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6659 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6660 return ENXIO;
6661 }
6662 adapter->osdep.nintrs = 1;
6663 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6664 intrbuf, sizeof(intrbuf));
6665 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6666 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6667 device_xname(dev));
6668 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6669 if (adapter->osdep.ihs[0] == NULL) {
6670 aprint_error_dev(dev,"unable to establish %s\n",
6671 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6672 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6673 adapter->osdep.intrs = NULL;
6674 switch (intr_type) {
6675 case PCI_INTR_TYPE_MSI:
6676 /* The next try is for INTx: Disable MSI */
6677 max_type = PCI_INTR_TYPE_INTX;
6678 counts[PCI_INTR_TYPE_INTX] = 1;
6679 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6680 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6681 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6682 goto alloc_retry;
6683 } else
6684 break;
6685 case PCI_INTR_TYPE_INTX:
6686 default:
6687 /* See below */
6688 break;
6689 }
6690 }
6691 if (intr_type == PCI_INTR_TYPE_INTX) {
6692 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6693 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6694 }
6695 if (adapter->osdep.ihs[0] == NULL) {
6696 aprint_error_dev(dev,
6697 "couldn't establish interrupt%s%s\n",
6698 intrstr ? " at " : "", intrstr ? intrstr : "");
6699 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6700 adapter->osdep.intrs = NULL;
6701 return ENXIO;
6702 }
6703 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6704 /*
6705 * Try allocating a fast interrupt and the associated deferred
6706 * processing contexts.
6707 */
6708 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6709 txr->txr_si =
6710 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6711 ixgbe_deferred_mq_start, txr);
6712
6713 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6714 defertx_error = workqueue_create(&adapter->txr_wq, wqname,
6715 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
6716 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6717 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6718 }
6719 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6720 ixgbe_handle_que, que);
6721 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6722 error = workqueue_create(&adapter->que_wq, wqname,
6723 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6724 IXGBE_WORKQUEUE_FLAGS);
6725
6726 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6727 && ((txr->txr_si == NULL) || defertx_error != 0))
6728 || (que->que_si == NULL) || error != 0) {
6729 aprint_error_dev(dev,
6730 "could not establish software interrupts\n");
6731
6732 return ENXIO;
6733 }
6734 /* For simplicity in the handlers */
6735 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6736
6737 return (0);
6738 } /* ixgbe_allocate_legacy */
6739
6740 /************************************************************************
6741 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6742 ************************************************************************/
6743 static int
6744 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6745 {
6746 device_t dev = adapter->dev;
6747 struct ix_queue *que = adapter->queues;
6748 struct tx_ring *txr = adapter->tx_rings;
6749 pci_chipset_tag_t pc;
6750 char intrbuf[PCI_INTRSTR_LEN];
6751 char intr_xname[32];
6752 char wqname[MAXCOMLEN];
6753 const char *intrstr = NULL;
6754 int error, vector = 0;
6755 int cpu_id = 0;
6756 kcpuset_t *affinity;
6757 #ifdef RSS
6758 unsigned int rss_buckets = 0;
6759 kcpuset_t cpu_mask;
6760 #endif
6761
6762 pc = adapter->osdep.pc;
6763 #ifdef RSS
6764 /*
6765 * If we're doing RSS, the number of queues needs to
6766 * match the number of RSS buckets that are configured.
6767 *
6768 * + If there's more queues than RSS buckets, we'll end
6769 * up with queues that get no traffic.
6770 *
6771 * + If there's more RSS buckets than queues, we'll end
6772 * up having multiple RSS buckets map to the same queue,
6773 * so there'll be some contention.
6774 */
6775 rss_buckets = rss_getnumbuckets();
6776 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6777 (adapter->num_queues != rss_buckets)) {
6778 device_printf(dev,
6779 "%s: number of queues (%d) != number of RSS buckets (%d)"
6780 "; performance will be impacted.\n",
6781 __func__, adapter->num_queues, rss_buckets);
6782 }
6783 #endif
6784
6785 adapter->osdep.nintrs = adapter->num_queues + 1;
6786 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6787 adapter->osdep.nintrs) != 0) {
6788 aprint_error_dev(dev,
6789 "failed to allocate MSI-X interrupt\n");
6790 return (ENXIO);
6791 }
6792
6793 kcpuset_create(&affinity, false);
6794 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6795 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6796 device_xname(dev), i);
6797 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6798 sizeof(intrbuf));
6799 #ifdef IXGBE_MPSAFE
6800 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6801 true);
6802 #endif
6803 /* Set the handler function */
6804 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6805 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6806 intr_xname);
6807 if (que->res == NULL) {
6808 aprint_error_dev(dev,
6809 "Failed to register QUE handler\n");
6810 error = ENXIO;
6811 goto err_out;
6812 }
6813 que->msix = vector;
6814 adapter->active_queues |= 1ULL << que->msix;
6815
6816 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6817 #ifdef RSS
6818 /*
6819 * The queue ID is used as the RSS layer bucket ID.
6820 * We look up the queue ID -> RSS CPU ID and select
6821 * that.
6822 */
6823 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6824 CPU_SETOF(cpu_id, &cpu_mask);
6825 #endif
6826 } else {
6827 /*
6828 * Bind the MSI-X vector, and thus the
6829 * rings to the corresponding CPU.
6830 *
6831 * This just happens to match the default RSS
6832 * round-robin bucket -> queue -> CPU allocation.
6833 */
6834 if (adapter->num_queues > 1)
6835 cpu_id = i;
6836 }
6837 /* Round-robin affinity */
6838 kcpuset_zero(affinity);
6839 kcpuset_set(affinity, cpu_id % ncpu);
6840 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6841 NULL);
6842 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6843 intrstr);
6844 if (error == 0) {
6845 #if 1 /* def IXGBE_DEBUG */
6846 #ifdef RSS
6847 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6848 cpu_id % ncpu);
6849 #else
6850 aprint_normal(", bound queue %d to cpu %d", i,
6851 cpu_id % ncpu);
6852 #endif
6853 #endif /* IXGBE_DEBUG */
6854 }
6855 aprint_normal("\n");
6856
6857 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6858 txr->txr_si = softint_establish(
6859 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6860 ixgbe_deferred_mq_start, txr);
6861 if (txr->txr_si == NULL) {
6862 aprint_error_dev(dev,
6863 "couldn't establish software interrupt\n");
6864 error = ENXIO;
6865 goto err_out;
6866 }
6867 }
6868 que->que_si
6869 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6870 ixgbe_handle_que, que);
6871 if (que->que_si == NULL) {
6872 aprint_error_dev(dev,
6873 "couldn't establish software interrupt\n");
6874 error = ENXIO;
6875 goto err_out;
6876 }
6877 }
6878 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6879 error = workqueue_create(&adapter->txr_wq, wqname,
6880 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6881 IXGBE_WORKQUEUE_FLAGS);
6882 if (error) {
6883 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6884 goto err_out;
6885 }
6886 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6887
6888 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6889 error = workqueue_create(&adapter->que_wq, wqname,
6890 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6891 IXGBE_WORKQUEUE_FLAGS);
6892 if (error) {
6893 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6894 goto err_out;
6895 }
6896
6897 /* and Link */
6898 cpu_id++;
6899 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6900 adapter->vector = vector;
6901 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6902 sizeof(intrbuf));
6903 #ifdef IXGBE_MPSAFE
6904 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6905 true);
6906 #endif
6907 /* Set the link handler function */
6908 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6909 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, adapter,
6910 intr_xname);
6911 if (adapter->osdep.ihs[vector] == NULL) {
6912 aprint_error_dev(dev, "Failed to register LINK handler\n");
6913 error = ENXIO;
6914 goto err_out;
6915 }
6916 /* Round-robin affinity */
6917 kcpuset_zero(affinity);
6918 kcpuset_set(affinity, cpu_id % ncpu);
6919 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6920 NULL);
6921
6922 aprint_normal_dev(dev,
6923 "for link, interrupting at %s", intrstr);
6924 if (error == 0)
6925 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6926 else
6927 aprint_normal("\n");
6928
6929 kcpuset_destroy(affinity);
6930 aprint_normal_dev(dev,
6931 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6932
6933 return (0);
6934
6935 err_out:
6936 kcpuset_destroy(affinity);
6937 ixgbe_free_deferred_handlers(adapter);
6938 ixgbe_free_pciintr_resources(adapter);
6939 return (error);
6940 } /* ixgbe_allocate_msix */
6941
6942 /************************************************************************
6943 * ixgbe_configure_interrupts
6944 *
6945 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6946 * This will also depend on user settings.
6947 ************************************************************************/
6948 static int
6949 ixgbe_configure_interrupts(struct adapter *adapter)
6950 {
6951 device_t dev = adapter->dev;
6952 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6953 int want, queues, msgs;
6954
6955 /* Default to 1 queue if MSI-X setup fails */
6956 adapter->num_queues = 1;
6957
6958 /* Override by tuneable */
6959 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6960 goto msi;
6961
6962 /*
6963 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6964 * interrupt slot.
6965 */
6966 if (ncpu == 1)
6967 goto msi;
6968
6969 /* First try MSI-X */
6970 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6971 msgs = MIN(msgs, IXG_MAX_NINTR);
6972 if (msgs < 2)
6973 goto msi;
6974
6975 adapter->msix_mem = (void *)1; /* XXX */
6976
6977 /* Figure out a reasonable auto config value */
6978 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6979
6980 #ifdef RSS
6981 /* If we're doing RSS, clamp at the number of RSS buckets */
6982 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6983 queues = uimin(queues, rss_getnumbuckets());
6984 #endif
6985 if (ixgbe_num_queues > queues) {
6986 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6987 ixgbe_num_queues = queues;
6988 }
6989
6990 if (ixgbe_num_queues != 0)
6991 queues = ixgbe_num_queues;
6992 else
6993 queues = uimin(queues,
6994 uimin(mac->max_tx_queues, mac->max_rx_queues));
6995
6996 /* reflect correct sysctl value */
6997 ixgbe_num_queues = queues;
6998
6999 /*
7000 * Want one vector (RX/TX pair) per queue
7001 * plus an additional for Link.
7002 */
7003 want = queues + 1;
7004 if (msgs >= want)
7005 msgs = want;
7006 else {
7007 aprint_error_dev(dev, "MSI-X Configuration Problem, "
7008 "%d vectors but %d queues wanted!\n",
7009 msgs, want);
7010 goto msi;
7011 }
7012 adapter->num_queues = queues;
7013 adapter->feat_en |= IXGBE_FEATURE_MSIX;
7014 return (0);
7015
7016 /*
7017 * MSI-X allocation failed or provided us with
7018 * less vectors than needed. Free MSI-X resources
7019 * and we'll try enabling MSI.
7020 */
7021 msi:
7022 /* Without MSI-X, some features are no longer supported */
7023 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
7024 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
7025 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7026 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
7027
7028 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
7029 adapter->msix_mem = NULL; /* XXX */
7030 if (msgs > 1)
7031 msgs = 1;
7032 if (msgs != 0) {
7033 msgs = 1;
7034 adapter->feat_en |= IXGBE_FEATURE_MSI;
7035 return (0);
7036 }
7037
7038 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7039 aprint_error_dev(dev,
7040 "Device does not support legacy interrupts.\n");
7041 return 1;
7042 }
7043
7044 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7045
7046 return (0);
7047 } /* ixgbe_configure_interrupts */
7048
7049
7050 /************************************************************************
7051 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7052 *
7053 * Done outside of interrupt context since the driver might sleep
7054 ************************************************************************/
7055 static void
7056 ixgbe_handle_link(void *context)
7057 {
7058 struct adapter *adapter = context;
7059 struct ixgbe_hw *hw = &adapter->hw;
7060
7061 KASSERT(mutex_owned(&adapter->core_mtx));
7062
7063 ++adapter->link_workev.ev_count;
7064 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
7065 ixgbe_update_link_status(adapter);
7066
7067 /* Re-enable link interrupts */
7068 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7069 } /* ixgbe_handle_link */
7070
7071 #if 0
7072 /************************************************************************
7073 * ixgbe_rearm_queues
7074 ************************************************************************/
7075 static __inline void
7076 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
7077 {
7078 u32 mask;
7079
7080 switch (adapter->hw.mac.type) {
7081 case ixgbe_mac_82598EB:
7082 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7083 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
7084 break;
7085 case ixgbe_mac_82599EB:
7086 case ixgbe_mac_X540:
7087 case ixgbe_mac_X550:
7088 case ixgbe_mac_X550EM_x:
7089 case ixgbe_mac_X550EM_a:
7090 mask = (queues & 0xFFFFFFFF);
7091 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
7092 mask = (queues >> 32);
7093 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
7094 break;
7095 default:
7096 break;
7097 }
7098 } /* ixgbe_rearm_queues */
7099 #endif
7100