ixgbe.c revision 1.167 1 /* $NetBSD: ixgbe.c,v 1.167 2018/09/27 05:40:27 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 #if 0
195 static void ixgbe_rearm_queues(struct adapter *, u64);
196 #endif
197
198 static void ixgbe_initialize_transmit_units(struct adapter *);
199 static void ixgbe_initialize_receive_units(struct adapter *);
200 static void ixgbe_enable_rx_drop(struct adapter *);
201 static void ixgbe_disable_rx_drop(struct adapter *);
202 static void ixgbe_initialize_rss_mapping(struct adapter *);
203
204 static void ixgbe_enable_intr(struct adapter *);
205 static void ixgbe_disable_intr(struct adapter *);
206 static void ixgbe_update_stats_counters(struct adapter *);
207 static void ixgbe_set_promisc(struct adapter *);
208 static void ixgbe_set_multi(struct adapter *);
209 static void ixgbe_update_link_status(struct adapter *);
210 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
211 static void ixgbe_configure_ivars(struct adapter *);
212 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
213 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
214
215 static void ixgbe_setup_vlan_hw_support(struct adapter *);
216 #if 0
217 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
218 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
219 #endif
220
221 static void ixgbe_add_device_sysctls(struct adapter *);
222 static void ixgbe_add_hw_stats(struct adapter *);
223 static void ixgbe_clear_evcnt(struct adapter *);
224 static int ixgbe_set_flowcntl(struct adapter *, int);
225 static int ixgbe_set_advertise(struct adapter *, int);
226 static int ixgbe_get_advertise(struct adapter *);
227
228 /* Sysctl handlers */
229 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
230 const char *, int *, int);
231 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
235 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
237 #ifdef IXGBE_DEBUG
238 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
240 #endif
241 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
250
251 /* Support for pluggable optic modules */
252 static bool ixgbe_sfp_probe(struct adapter *);
253
254 /* Legacy (single vector) interrupt handler */
255 static int ixgbe_legacy_irq(void *);
256
257 /* The MSI/MSI-X Interrupt handlers */
258 static int ixgbe_msix_que(void *);
259 static int ixgbe_msix_link(void *);
260
261 /* Software interrupts for deferred work */
262 static void ixgbe_handle_que(void *);
263 static void ixgbe_handle_link(void *);
264 static void ixgbe_handle_msf(void *);
265 static void ixgbe_handle_mod(void *);
266 static void ixgbe_handle_phy(void *);
267
268 /* Workqueue handler for deferred work */
269 static void ixgbe_handle_que_work(struct work *, void *);
270
271 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
272
273 /************************************************************************
274 * NetBSD Device Interface Entry Points
275 ************************************************************************/
276 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
277 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
278 DVF_DETACH_SHUTDOWN);
279
280 #if 0
281 devclass_t ix_devclass;
282 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
283
284 MODULE_DEPEND(ix, pci, 1, 1, 1);
285 MODULE_DEPEND(ix, ether, 1, 1, 1);
286 #ifdef DEV_NETMAP
287 MODULE_DEPEND(ix, netmap, 1, 1, 1);
288 #endif
289 #endif
290
291 /*
292 * TUNEABLE PARAMETERS:
293 */
294
295 /*
296 * AIM: Adaptive Interrupt Moderation
297 * which means that the interrupt rate
298 * is varied over time based on the
299 * traffic for that interrupt vector
300 */
301 static bool ixgbe_enable_aim = true;
302 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
303 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
304 "Enable adaptive interrupt moderation");
305
306 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
307 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
308 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
309
310 /* How many packets rxeof tries to clean at a time */
311 static int ixgbe_rx_process_limit = 256;
312 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
313 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
314
315 /* How many packets txeof tries to clean at a time */
316 static int ixgbe_tx_process_limit = 256;
317 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
318 &ixgbe_tx_process_limit, 0,
319 "Maximum number of sent packets to process at a time, -1 means unlimited");
320
321 /* Flow control setting, default to full */
322 static int ixgbe_flow_control = ixgbe_fc_full;
323 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
324 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
325
326 /* Which pakcet processing uses workqueue or softint */
327 static bool ixgbe_txrx_workqueue = false;
328
329 /*
330 * Smart speed setting, default to on
331 * this only works as a compile option
332 * right now as its during attach, set
333 * this to 'ixgbe_smart_speed_off' to
334 * disable.
335 */
336 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
337
338 /*
339 * MSI-X should be the default for best performance,
340 * but this allows it to be forced off for testing.
341 */
342 static int ixgbe_enable_msix = 1;
343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
344 "Enable MSI-X interrupts");
345
346 /*
347 * Number of Queues, can be set to 0,
348 * it then autoconfigures based on the
349 * number of cpus with a max of 8. This
350 * can be overriden manually here.
351 */
352 static int ixgbe_num_queues = 0;
353 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
354 "Number of queues to configure, 0 indicates autoconfigure");
355
356 /*
357 * Number of TX descriptors per ring,
358 * setting higher than RX as this seems
359 * the better performing choice.
360 */
361 static int ixgbe_txd = PERFORM_TXD;
362 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
363 "Number of transmit descriptors per queue");
364
365 /* Number of RX descriptors per ring */
366 static int ixgbe_rxd = PERFORM_RXD;
367 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
368 "Number of receive descriptors per queue");
369
370 /*
371 * Defining this on will allow the use
372 * of unsupported SFP+ modules, note that
373 * doing so you are on your own :)
374 */
375 static int allow_unsupported_sfp = false;
376 #define TUNABLE_INT(__x, __y)
377 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
378
379 /*
380 * Not sure if Flow Director is fully baked,
381 * so we'll default to turning it off.
382 */
383 static int ixgbe_enable_fdir = 0;
384 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
385 "Enable Flow Director");
386
387 /* Legacy Transmit (single queue) */
388 static int ixgbe_enable_legacy_tx = 0;
389 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
390 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
391
392 /* Receive-Side Scaling */
393 static int ixgbe_enable_rss = 1;
394 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
395 "Enable Receive-Side Scaling (RSS)");
396
397 #if 0
398 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
399 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
400 #endif
401
402 #ifdef NET_MPSAFE
403 #define IXGBE_MPSAFE 1
404 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
405 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
406 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
407 #else
408 #define IXGBE_CALLOUT_FLAGS 0
409 #define IXGBE_SOFTINFT_FLAGS 0
410 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
411 #endif
412 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
413
414 /************************************************************************
415 * ixgbe_initialize_rss_mapping
416 ************************************************************************/
417 static void
418 ixgbe_initialize_rss_mapping(struct adapter *adapter)
419 {
420 struct ixgbe_hw *hw = &adapter->hw;
421 u32 reta = 0, mrqc, rss_key[10];
422 int queue_id, table_size, index_mult;
423 int i, j;
424 u32 rss_hash_config;
425
426 /* force use default RSS key. */
427 #ifdef __NetBSD__
428 rss_getkey((uint8_t *) &rss_key);
429 #else
430 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
431 /* Fetch the configured RSS key */
432 rss_getkey((uint8_t *) &rss_key);
433 } else {
434 /* set up random bits */
435 cprng_fast(&rss_key, sizeof(rss_key));
436 }
437 #endif
438
439 /* Set multiplier for RETA setup and table size based on MAC */
440 index_mult = 0x1;
441 table_size = 128;
442 switch (adapter->hw.mac.type) {
443 case ixgbe_mac_82598EB:
444 index_mult = 0x11;
445 break;
446 case ixgbe_mac_X550:
447 case ixgbe_mac_X550EM_x:
448 case ixgbe_mac_X550EM_a:
449 table_size = 512;
450 break;
451 default:
452 break;
453 }
454
455 /* Set up the redirection table */
456 for (i = 0, j = 0; i < table_size; i++, j++) {
457 if (j == adapter->num_queues)
458 j = 0;
459
460 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
461 /*
462 * Fetch the RSS bucket id for the given indirection
463 * entry. Cap it at the number of configured buckets
464 * (which is num_queues.)
465 */
466 queue_id = rss_get_indirection_to_bucket(i);
467 queue_id = queue_id % adapter->num_queues;
468 } else
469 queue_id = (j * index_mult);
470
471 /*
472 * The low 8 bits are for hash value (n+0);
473 * The next 8 bits are for hash value (n+1), etc.
474 */
475 reta = reta >> 8;
476 reta = reta | (((uint32_t) queue_id) << 24);
477 if ((i & 3) == 3) {
478 if (i < 128)
479 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
480 else
481 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
482 reta);
483 reta = 0;
484 }
485 }
486
487 /* Now fill our hash function seeds */
488 for (i = 0; i < 10; i++)
489 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
490
491 /* Perform hash on these packet types */
492 if (adapter->feat_en & IXGBE_FEATURE_RSS)
493 rss_hash_config = rss_gethashconfig();
494 else {
495 /*
496 * Disable UDP - IP fragments aren't currently being handled
497 * and so we end up with a mix of 2-tuple and 4-tuple
498 * traffic.
499 */
500 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
501 | RSS_HASHTYPE_RSS_TCP_IPV4
502 | RSS_HASHTYPE_RSS_IPV6
503 | RSS_HASHTYPE_RSS_TCP_IPV6
504 | RSS_HASHTYPE_RSS_IPV6_EX
505 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
506 }
507
508 mrqc = IXGBE_MRQC_RSSEN;
509 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
510 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
511 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
512 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
513 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
514 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
527 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
528 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
529 } /* ixgbe_initialize_rss_mapping */
530
531 /************************************************************************
532 * ixgbe_initialize_receive_units - Setup receive registers and features.
533 ************************************************************************/
534 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
535
536 static void
537 ixgbe_initialize_receive_units(struct adapter *adapter)
538 {
539 struct rx_ring *rxr = adapter->rx_rings;
540 struct ixgbe_hw *hw = &adapter->hw;
541 struct ifnet *ifp = adapter->ifp;
542 int i, j;
543 u32 bufsz, fctrl, srrctl, rxcsum;
544 u32 hlreg;
545
546 /*
547 * Make sure receives are disabled while
548 * setting up the descriptor ring
549 */
550 ixgbe_disable_rx(hw);
551
552 /* Enable broadcasts */
553 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
554 fctrl |= IXGBE_FCTRL_BAM;
555 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
556 fctrl |= IXGBE_FCTRL_DPF;
557 fctrl |= IXGBE_FCTRL_PMCF;
558 }
559 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
560
561 /* Set for Jumbo Frames? */
562 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
563 if (ifp->if_mtu > ETHERMTU)
564 hlreg |= IXGBE_HLREG0_JUMBOEN;
565 else
566 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
567
568 #ifdef DEV_NETMAP
569 /* CRC stripping is conditional in Netmap */
570 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
571 (ifp->if_capenable & IFCAP_NETMAP) &&
572 !ix_crcstrip)
573 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
574 else
575 #endif /* DEV_NETMAP */
576 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
577
578 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
579
580 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
581 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
582
583 for (i = 0; i < adapter->num_queues; i++, rxr++) {
584 u64 rdba = rxr->rxdma.dma_paddr;
585 u32 reg;
586 int regnum = i / 4; /* 1 register per 4 queues */
587 int regshift = i % 4; /* 4 bits per 1 queue */
588 j = rxr->me;
589
590 /* Setup the Base and Length of the Rx Descriptor Ring */
591 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
592 (rdba & 0x00000000ffffffffULL));
593 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
594 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
595 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
596
597 /* Set up the SRRCTL register */
598 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
599 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
600 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
601 srrctl |= bufsz;
602 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
603
604 /* Set RQSMR (Receive Queue Statistic Mapping) register */
605 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
606 reg &= ~(0x000000ff << (regshift * 8));
607 reg |= i << (regshift * 8);
608 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
609
610 /*
611 * Set DROP_EN iff we have no flow control and >1 queue.
612 * Note that srrctl was cleared shortly before during reset,
613 * so we do not need to clear the bit, but do it just in case
614 * this code is moved elsewhere.
615 */
616 if (adapter->num_queues > 1 &&
617 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
618 srrctl |= IXGBE_SRRCTL_DROP_EN;
619 } else {
620 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
621 }
622
623 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
624
625 /* Setup the HW Rx Head and Tail Descriptor Pointers */
626 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
627 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
628
629 /* Set the driver rx tail address */
630 rxr->tail = IXGBE_RDT(rxr->me);
631 }
632
633 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
634 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
635 | IXGBE_PSRTYPE_UDPHDR
636 | IXGBE_PSRTYPE_IPV4HDR
637 | IXGBE_PSRTYPE_IPV6HDR;
638 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
639 }
640
641 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
642
643 ixgbe_initialize_rss_mapping(adapter);
644
645 if (adapter->num_queues > 1) {
646 /* RSS and RX IPP Checksum are mutually exclusive */
647 rxcsum |= IXGBE_RXCSUM_PCSD;
648 }
649
650 if (ifp->if_capenable & IFCAP_RXCSUM)
651 rxcsum |= IXGBE_RXCSUM_PCSD;
652
653 /* This is useful for calculating UDP/IP fragment checksums */
654 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
655 rxcsum |= IXGBE_RXCSUM_IPPCSE;
656
657 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
658
659 } /* ixgbe_initialize_receive_units */
660
661 /************************************************************************
662 * ixgbe_initialize_transmit_units - Enable transmit units.
663 ************************************************************************/
664 static void
665 ixgbe_initialize_transmit_units(struct adapter *adapter)
666 {
667 struct tx_ring *txr = adapter->tx_rings;
668 struct ixgbe_hw *hw = &adapter->hw;
669 int i;
670
671 /* Setup the Base and Length of the Tx Descriptor Ring */
672 for (i = 0; i < adapter->num_queues; i++, txr++) {
673 u64 tdba = txr->txdma.dma_paddr;
674 u32 txctrl = 0;
675 u32 tqsmreg, reg;
676 int regnum = i / 4; /* 1 register per 4 queues */
677 int regshift = i % 4; /* 4 bits per 1 queue */
678 int j = txr->me;
679
680 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
681 (tdba & 0x00000000ffffffffULL));
682 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
683 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
684 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
685
686 /*
687 * Set TQSMR (Transmit Queue Statistic Mapping) register.
688 * Register location is different between 82598 and others.
689 */
690 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
691 tqsmreg = IXGBE_TQSMR(regnum);
692 else
693 tqsmreg = IXGBE_TQSM(regnum);
694 reg = IXGBE_READ_REG(hw, tqsmreg);
695 reg &= ~(0x000000ff << (regshift * 8));
696 reg |= i << (regshift * 8);
697 IXGBE_WRITE_REG(hw, tqsmreg, reg);
698
699 /* Setup the HW Tx Head and Tail descriptor pointers */
700 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
701 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
702
703 /* Cache the tail address */
704 txr->tail = IXGBE_TDT(j);
705
706 txr->txr_no_space = false;
707
708 /* Disable Head Writeback */
709 /*
710 * Note: for X550 series devices, these registers are actually
711 * prefixed with TPH_ isntead of DCA_, but the addresses and
712 * fields remain the same.
713 */
714 switch (hw->mac.type) {
715 case ixgbe_mac_82598EB:
716 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
717 break;
718 default:
719 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
720 break;
721 }
722 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
723 switch (hw->mac.type) {
724 case ixgbe_mac_82598EB:
725 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
726 break;
727 default:
728 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
729 break;
730 }
731
732 }
733
734 if (hw->mac.type != ixgbe_mac_82598EB) {
735 u32 dmatxctl, rttdcs;
736
737 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
738 dmatxctl |= IXGBE_DMATXCTL_TE;
739 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
740 /* Disable arbiter to set MTQC */
741 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
742 rttdcs |= IXGBE_RTTDCS_ARBDIS;
743 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
744 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
745 ixgbe_get_mtqc(adapter->iov_mode));
746 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
747 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
748 }
749
750 return;
751 } /* ixgbe_initialize_transmit_units */
752
753 /************************************************************************
754 * ixgbe_attach - Device initialization routine
755 *
756 * Called when the driver is being loaded.
757 * Identifies the type of hardware, allocates all resources
758 * and initializes the hardware.
759 *
760 * return 0 on success, positive on failure
761 ************************************************************************/
762 static void
763 ixgbe_attach(device_t parent, device_t dev, void *aux)
764 {
765 struct adapter *adapter;
766 struct ixgbe_hw *hw;
767 int error = -1;
768 u32 ctrl_ext;
769 u16 high, low, nvmreg;
770 pcireg_t id, subid;
771 const ixgbe_vendor_info_t *ent;
772 struct pci_attach_args *pa = aux;
773 const char *str;
774 char buf[256];
775
776 INIT_DEBUGOUT("ixgbe_attach: begin");
777
778 /* Allocate, clear, and link in our adapter structure */
779 adapter = device_private(dev);
780 adapter->hw.back = adapter;
781 adapter->dev = dev;
782 hw = &adapter->hw;
783 adapter->osdep.pc = pa->pa_pc;
784 adapter->osdep.tag = pa->pa_tag;
785 if (pci_dma64_available(pa))
786 adapter->osdep.dmat = pa->pa_dmat64;
787 else
788 adapter->osdep.dmat = pa->pa_dmat;
789 adapter->osdep.attached = false;
790
791 ent = ixgbe_lookup(pa);
792
793 KASSERT(ent != NULL);
794
795 aprint_normal(": %s, Version - %s\n",
796 ixgbe_strings[ent->index], ixgbe_driver_version);
797
798 /* Core Lock Init*/
799 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
800
801 /* Set up the timer callout */
802 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
803
804 /* Determine hardware revision */
805 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
806 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
807
808 hw->vendor_id = PCI_VENDOR(id);
809 hw->device_id = PCI_PRODUCT(id);
810 hw->revision_id =
811 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
812 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
813 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
814
815 /*
816 * Make sure BUSMASTER is set
817 */
818 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
819
820 /* Do base PCI setup - map BAR0 */
821 if (ixgbe_allocate_pci_resources(adapter, pa)) {
822 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
823 error = ENXIO;
824 goto err_out;
825 }
826
827 /* let hardware know driver is loaded */
828 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
829 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
830 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
831
832 /*
833 * Initialize the shared code
834 */
835 if (ixgbe_init_shared_code(hw) != 0) {
836 aprint_error_dev(dev, "Unable to initialize the shared code\n");
837 error = ENXIO;
838 goto err_out;
839 }
840
841 switch (hw->mac.type) {
842 case ixgbe_mac_82598EB:
843 str = "82598EB";
844 break;
845 case ixgbe_mac_82599EB:
846 str = "82599EB";
847 break;
848 case ixgbe_mac_X540:
849 str = "X540";
850 break;
851 case ixgbe_mac_X550:
852 str = "X550";
853 break;
854 case ixgbe_mac_X550EM_x:
855 str = "X550EM";
856 break;
857 case ixgbe_mac_X550EM_a:
858 str = "X550EM A";
859 break;
860 default:
861 str = "Unknown";
862 break;
863 }
864 aprint_normal_dev(dev, "device %s\n", str);
865
866 if (hw->mbx.ops.init_params)
867 hw->mbx.ops.init_params(hw);
868
869 hw->allow_unsupported_sfp = allow_unsupported_sfp;
870
871 /* Pick up the 82599 settings */
872 if (hw->mac.type != ixgbe_mac_82598EB) {
873 hw->phy.smart_speed = ixgbe_smart_speed;
874 adapter->num_segs = IXGBE_82599_SCATTER;
875 } else
876 adapter->num_segs = IXGBE_82598_SCATTER;
877
878 hw->mac.ops.set_lan_id(hw);
879 ixgbe_init_device_features(adapter);
880
881 if (ixgbe_configure_interrupts(adapter)) {
882 error = ENXIO;
883 goto err_out;
884 }
885
886 /* Allocate multicast array memory. */
887 adapter->mta = malloc(sizeof(*adapter->mta) *
888 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
889 if (adapter->mta == NULL) {
890 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
891 error = ENOMEM;
892 goto err_out;
893 }
894
895 /* Enable WoL (if supported) */
896 ixgbe_check_wol_support(adapter);
897
898 /* Verify adapter fan is still functional (if applicable) */
899 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
900 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
901 ixgbe_check_fan_failure(adapter, esdp, FALSE);
902 }
903
904 /* Ensure SW/FW semaphore is free */
905 ixgbe_init_swfw_semaphore(hw);
906
907 /* Enable EEE power saving */
908 if (adapter->feat_en & IXGBE_FEATURE_EEE)
909 hw->mac.ops.setup_eee(hw, TRUE);
910
911 /* Set an initial default flow control value */
912 hw->fc.requested_mode = ixgbe_flow_control;
913
914 /* Sysctls for limiting the amount of work done in the taskqueues */
915 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
916 "max number of rx packets to process",
917 &adapter->rx_process_limit, ixgbe_rx_process_limit);
918
919 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
920 "max number of tx packets to process",
921 &adapter->tx_process_limit, ixgbe_tx_process_limit);
922
923 /* Do descriptor calc and sanity checks */
924 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
925 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
926 aprint_error_dev(dev, "TXD config issue, using default!\n");
927 adapter->num_tx_desc = DEFAULT_TXD;
928 } else
929 adapter->num_tx_desc = ixgbe_txd;
930
931 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
932 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
933 aprint_error_dev(dev, "RXD config issue, using default!\n");
934 adapter->num_rx_desc = DEFAULT_RXD;
935 } else
936 adapter->num_rx_desc = ixgbe_rxd;
937
938 /* Allocate our TX/RX Queues */
939 if (ixgbe_allocate_queues(adapter)) {
940 error = ENOMEM;
941 goto err_out;
942 }
943
944 hw->phy.reset_if_overtemp = TRUE;
945 error = ixgbe_reset_hw(hw);
946 hw->phy.reset_if_overtemp = FALSE;
947 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
948 /*
949 * No optics in this port, set up
950 * so the timer routine will probe
951 * for later insertion.
952 */
953 adapter->sfp_probe = TRUE;
954 error = IXGBE_SUCCESS;
955 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
956 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
957 error = EIO;
958 goto err_late;
959 } else if (error) {
960 aprint_error_dev(dev, "Hardware initialization failed\n");
961 error = EIO;
962 goto err_late;
963 }
964
965 /* Make sure we have a good EEPROM before we read from it */
966 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
967 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
968 error = EIO;
969 goto err_late;
970 }
971
972 aprint_normal("%s:", device_xname(dev));
973 /* NVM Image Version */
974 switch (hw->mac.type) {
975 case ixgbe_mac_X540:
976 case ixgbe_mac_X550EM_a:
977 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
978 if (nvmreg == 0xffff)
979 break;
980 high = (nvmreg >> 12) & 0x0f;
981 low = (nvmreg >> 4) & 0xff;
982 id = nvmreg & 0x0f;
983 aprint_normal(" NVM Image Version %u.", high);
984 if (hw->mac.type == ixgbe_mac_X540)
985 str = "%x";
986 else
987 str = "%02x";
988 aprint_normal(str, low);
989 aprint_normal(" ID 0x%x,", id);
990 break;
991 case ixgbe_mac_X550EM_x:
992 case ixgbe_mac_X550:
993 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
994 if (nvmreg == 0xffff)
995 break;
996 high = (nvmreg >> 12) & 0x0f;
997 low = nvmreg & 0xff;
998 aprint_normal(" NVM Image Version %u.%02x,", high, low);
999 break;
1000 default:
1001 break;
1002 }
1003
1004 /* PHY firmware revision */
1005 switch (hw->mac.type) {
1006 case ixgbe_mac_X540:
1007 case ixgbe_mac_X550:
1008 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1009 if (nvmreg == 0xffff)
1010 break;
1011 high = (nvmreg >> 12) & 0x0f;
1012 low = (nvmreg >> 4) & 0xff;
1013 id = nvmreg & 0x000f;
1014 aprint_normal(" PHY FW Revision %u.", high);
1015 if (hw->mac.type == ixgbe_mac_X540)
1016 str = "%x";
1017 else
1018 str = "%02x";
1019 aprint_normal(str, low);
1020 aprint_normal(" ID 0x%x,", id);
1021 break;
1022 default:
1023 break;
1024 }
1025
1026 /* NVM Map version & OEM NVM Image version */
1027 switch (hw->mac.type) {
1028 case ixgbe_mac_X550:
1029 case ixgbe_mac_X550EM_x:
1030 case ixgbe_mac_X550EM_a:
1031 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1032 if (nvmreg != 0xffff) {
1033 high = (nvmreg >> 12) & 0x0f;
1034 low = nvmreg & 0x00ff;
1035 aprint_normal(" NVM Map version %u.%02x,", high, low);
1036 }
1037 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1038 if (nvmreg != 0xffff) {
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0x00ff;
1041 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1042 low);
1043 }
1044 break;
1045 default:
1046 break;
1047 }
1048
1049 /* Print the ETrackID */
1050 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1051 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1052 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1053
1054 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1055 error = ixgbe_allocate_msix(adapter, pa);
1056 if (error) {
1057 /* Free allocated queue structures first */
1058 ixgbe_free_transmit_structures(adapter);
1059 ixgbe_free_receive_structures(adapter);
1060 free(adapter->queues, M_DEVBUF);
1061
1062 /* Fallback to legacy interrupt */
1063 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1064 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1065 adapter->feat_en |= IXGBE_FEATURE_MSI;
1066 adapter->num_queues = 1;
1067
1068 /* Allocate our TX/RX Queues again */
1069 if (ixgbe_allocate_queues(adapter)) {
1070 error = ENOMEM;
1071 goto err_out;
1072 }
1073 }
1074 }
1075 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1076 error = ixgbe_allocate_legacy(adapter, pa);
1077 if (error)
1078 goto err_late;
1079
1080 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1081 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1082 ixgbe_handle_link, adapter);
1083 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1084 ixgbe_handle_mod, adapter);
1085 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1086 ixgbe_handle_msf, adapter);
1087 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1088 ixgbe_handle_phy, adapter);
1089 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1090 adapter->fdir_si =
1091 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1092 ixgbe_reinit_fdir, adapter);
1093 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1094 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1095 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1096 && (adapter->fdir_si == NULL))) {
1097 aprint_error_dev(dev,
1098 "could not establish software interrupts ()\n");
1099 goto err_out;
1100 }
1101
1102 error = ixgbe_start_hw(hw);
1103 switch (error) {
1104 case IXGBE_ERR_EEPROM_VERSION:
1105 aprint_error_dev(dev, "This device is a pre-production adapter/"
1106 "LOM. Please be aware there may be issues associated "
1107 "with your hardware.\nIf you are experiencing problems "
1108 "please contact your Intel or hardware representative "
1109 "who provided you with this hardware.\n");
1110 break;
1111 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1112 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1113 error = EIO;
1114 goto err_late;
1115 case IXGBE_ERR_SFP_NOT_PRESENT:
1116 aprint_error_dev(dev, "No SFP+ Module found\n");
1117 /* falls thru */
1118 default:
1119 break;
1120 }
1121
1122 /* Setup OS specific network interface */
1123 if (ixgbe_setup_interface(dev, adapter) != 0)
1124 goto err_late;
1125
1126 /*
1127 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1128 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1129 */
1130 if (hw->phy.media_type == ixgbe_media_type_copper) {
1131 uint16_t id1, id2;
1132 int oui, model, rev;
1133 const char *descr;
1134
1135 id1 = hw->phy.id >> 16;
1136 id2 = hw->phy.id & 0xffff;
1137 oui = MII_OUI(id1, id2);
1138 model = MII_MODEL(id2);
1139 rev = MII_REV(id2);
1140 if ((descr = mii_get_descr(oui, model)) != NULL)
1141 aprint_normal_dev(dev,
1142 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1143 descr, oui, model, rev);
1144 else
1145 aprint_normal_dev(dev,
1146 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1147 oui, model, rev);
1148 }
1149
1150 /* Enable the optics for 82599 SFP+ fiber */
1151 ixgbe_enable_tx_laser(hw);
1152
1153 /* Enable power to the phy. */
1154 ixgbe_set_phy_power(hw, TRUE);
1155
1156 /* Initialize statistics */
1157 ixgbe_update_stats_counters(adapter);
1158
1159 /* Check PCIE slot type/speed/width */
1160 ixgbe_get_slot_info(adapter);
1161
1162 /*
1163 * Do time init and sysctl init here, but
1164 * only on the first port of a bypass adapter.
1165 */
1166 ixgbe_bypass_init(adapter);
1167
1168 /* Set an initial dmac value */
1169 adapter->dmac = 0;
1170 /* Set initial advertised speeds (if applicable) */
1171 adapter->advertise = ixgbe_get_advertise(adapter);
1172
1173 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1174 ixgbe_define_iov_schemas(dev, &error);
1175
1176 /* Add sysctls */
1177 ixgbe_add_device_sysctls(adapter);
1178 ixgbe_add_hw_stats(adapter);
1179
1180 /* For Netmap */
1181 adapter->init_locked = ixgbe_init_locked;
1182 adapter->stop_locked = ixgbe_stop;
1183
1184 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1185 ixgbe_netmap_attach(adapter);
1186
1187 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1188 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1189 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1190 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1191
1192 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1193 pmf_class_network_register(dev, adapter->ifp);
1194 else
1195 aprint_error_dev(dev, "couldn't establish power handler\n");
1196
1197 INIT_DEBUGOUT("ixgbe_attach: end");
1198 adapter->osdep.attached = true;
1199
1200 return;
1201
1202 err_late:
1203 ixgbe_free_transmit_structures(adapter);
1204 ixgbe_free_receive_structures(adapter);
1205 free(adapter->queues, M_DEVBUF);
1206 err_out:
1207 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1208 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1209 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1210 ixgbe_free_softint(adapter);
1211 ixgbe_free_pci_resources(adapter);
1212 if (adapter->mta != NULL)
1213 free(adapter->mta, M_DEVBUF);
1214 IXGBE_CORE_LOCK_DESTROY(adapter);
1215
1216 return;
1217 } /* ixgbe_attach */
1218
1219 /************************************************************************
1220 * ixgbe_check_wol_support
1221 *
1222 * Checks whether the adapter's ports are capable of
1223 * Wake On LAN by reading the adapter's NVM.
1224 *
1225 * Sets each port's hw->wol_enabled value depending
1226 * on the value read here.
1227 ************************************************************************/
1228 static void
1229 ixgbe_check_wol_support(struct adapter *adapter)
1230 {
1231 struct ixgbe_hw *hw = &adapter->hw;
1232 u16 dev_caps = 0;
1233
1234 /* Find out WoL support for port */
1235 adapter->wol_support = hw->wol_enabled = 0;
1236 ixgbe_get_device_caps(hw, &dev_caps);
1237 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1238 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1239 hw->bus.func == 0))
1240 adapter->wol_support = hw->wol_enabled = 1;
1241
1242 /* Save initial wake up filter configuration */
1243 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1244
1245 return;
1246 } /* ixgbe_check_wol_support */
1247
1248 /************************************************************************
1249 * ixgbe_setup_interface
1250 *
1251 * Setup networking device structure and register an interface.
1252 ************************************************************************/
1253 static int
1254 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1255 {
1256 struct ethercom *ec = &adapter->osdep.ec;
1257 struct ifnet *ifp;
1258 int rv;
1259
1260 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1261
1262 ifp = adapter->ifp = &ec->ec_if;
1263 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1264 ifp->if_baudrate = IF_Gbps(10);
1265 ifp->if_init = ixgbe_init;
1266 ifp->if_stop = ixgbe_ifstop;
1267 ifp->if_softc = adapter;
1268 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1269 #ifdef IXGBE_MPSAFE
1270 ifp->if_extflags = IFEF_MPSAFE;
1271 #endif
1272 ifp->if_ioctl = ixgbe_ioctl;
1273 #if __FreeBSD_version >= 1100045
1274 /* TSO parameters */
1275 ifp->if_hw_tsomax = 65518;
1276 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1277 ifp->if_hw_tsomaxsegsize = 2048;
1278 #endif
1279 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1280 #if 0
1281 ixgbe_start_locked = ixgbe_legacy_start_locked;
1282 #endif
1283 } else {
1284 ifp->if_transmit = ixgbe_mq_start;
1285 #if 0
1286 ixgbe_start_locked = ixgbe_mq_start_locked;
1287 #endif
1288 }
1289 ifp->if_start = ixgbe_legacy_start;
1290 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1291 IFQ_SET_READY(&ifp->if_snd);
1292
1293 rv = if_initialize(ifp);
1294 if (rv != 0) {
1295 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1296 return rv;
1297 }
1298 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1299 ether_ifattach(ifp, adapter->hw.mac.addr);
1300 /*
1301 * We use per TX queue softint, so if_deferred_start_init() isn't
1302 * used.
1303 */
1304 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1305
1306 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1307
1308 /*
1309 * Tell the upper layer(s) we support long frames.
1310 */
1311 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1312
1313 /* Set capability flags */
1314 ifp->if_capabilities |= IFCAP_RXCSUM
1315 | IFCAP_TXCSUM
1316 | IFCAP_TSOv4
1317 | IFCAP_TSOv6;
1318 ifp->if_capenable = 0;
1319
1320 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1321 | ETHERCAP_VLAN_HWCSUM
1322 | ETHERCAP_JUMBO_MTU
1323 | ETHERCAP_VLAN_MTU;
1324
1325 /* Enable the above capabilities by default */
1326 ec->ec_capenable = ec->ec_capabilities;
1327
1328 /*
1329 * Don't turn this on by default, if vlans are
1330 * created on another pseudo device (eg. lagg)
1331 * then vlan events are not passed thru, breaking
1332 * operation, but with HW FILTER off it works. If
1333 * using vlans directly on the ixgbe driver you can
1334 * enable this and get full hardware tag filtering.
1335 */
1336 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1337
1338 /*
1339 * Specify the media types supported by this adapter and register
1340 * callbacks to update media and link information
1341 */
1342 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1343 ixgbe_media_status);
1344
1345 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1346 ixgbe_add_media_types(adapter);
1347
1348 /* Set autoselect media by default */
1349 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1350
1351 if_register(ifp);
1352
1353 return (0);
1354 } /* ixgbe_setup_interface */
1355
1356 /************************************************************************
1357 * ixgbe_add_media_types
1358 ************************************************************************/
1359 static void
1360 ixgbe_add_media_types(struct adapter *adapter)
1361 {
1362 struct ixgbe_hw *hw = &adapter->hw;
1363 device_t dev = adapter->dev;
1364 u64 layer;
1365
1366 layer = adapter->phy_layer;
1367
1368 #define ADD(mm, dd) \
1369 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1370
1371 ADD(IFM_NONE, 0);
1372
1373 /* Media types with matching NetBSD media defines */
1374 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1375 ADD(IFM_10G_T | IFM_FDX, 0);
1376 }
1377 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1378 ADD(IFM_1000_T | IFM_FDX, 0);
1379 }
1380 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1381 ADD(IFM_100_TX | IFM_FDX, 0);
1382 }
1383 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1384 ADD(IFM_10_T | IFM_FDX, 0);
1385 }
1386
1387 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1388 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1389 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1390 }
1391
1392 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1393 ADD(IFM_10G_LR | IFM_FDX, 0);
1394 if (hw->phy.multispeed_fiber) {
1395 ADD(IFM_1000_LX | IFM_FDX, 0);
1396 }
1397 }
1398 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1399 ADD(IFM_10G_SR | IFM_FDX, 0);
1400 if (hw->phy.multispeed_fiber) {
1401 ADD(IFM_1000_SX | IFM_FDX, 0);
1402 }
1403 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1404 ADD(IFM_1000_SX | IFM_FDX, 0);
1405 }
1406 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1407 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1408 }
1409
1410 #ifdef IFM_ETH_XTYPE
1411 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1412 ADD(IFM_10G_KR | IFM_FDX, 0);
1413 }
1414 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1415 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1416 }
1417 #else
1418 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1419 device_printf(dev, "Media supported: 10GbaseKR\n");
1420 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1421 ADD(IFM_10G_SR | IFM_FDX, 0);
1422 }
1423 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1424 device_printf(dev, "Media supported: 10GbaseKX4\n");
1425 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1426 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1427 }
1428 #endif
1429 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1430 ADD(IFM_1000_KX | IFM_FDX, 0);
1431 }
1432 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1433 ADD(IFM_2500_KX | IFM_FDX, 0);
1434 }
1435 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1436 ADD(IFM_2500_T | IFM_FDX, 0);
1437 }
1438 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1439 ADD(IFM_5000_T | IFM_FDX, 0);
1440 }
1441 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1442 device_printf(dev, "Media supported: 1000baseBX\n");
1443 /* XXX no ifmedia_set? */
1444
1445 ADD(IFM_AUTO, 0);
1446
1447 #undef ADD
1448 } /* ixgbe_add_media_types */
1449
1450 /************************************************************************
1451 * ixgbe_is_sfp
1452 ************************************************************************/
1453 static inline bool
1454 ixgbe_is_sfp(struct ixgbe_hw *hw)
1455 {
1456 switch (hw->mac.type) {
1457 case ixgbe_mac_82598EB:
1458 if (hw->phy.type == ixgbe_phy_nl)
1459 return (TRUE);
1460 return (FALSE);
1461 case ixgbe_mac_82599EB:
1462 switch (hw->mac.ops.get_media_type(hw)) {
1463 case ixgbe_media_type_fiber:
1464 case ixgbe_media_type_fiber_qsfp:
1465 return (TRUE);
1466 default:
1467 return (FALSE);
1468 }
1469 case ixgbe_mac_X550EM_x:
1470 case ixgbe_mac_X550EM_a:
1471 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1472 return (TRUE);
1473 return (FALSE);
1474 default:
1475 return (FALSE);
1476 }
1477 } /* ixgbe_is_sfp */
1478
1479 /************************************************************************
1480 * ixgbe_config_link
1481 ************************************************************************/
1482 static void
1483 ixgbe_config_link(struct adapter *adapter)
1484 {
1485 struct ixgbe_hw *hw = &adapter->hw;
1486 u32 autoneg, err = 0;
1487 bool sfp, negotiate = false;
1488
1489 sfp = ixgbe_is_sfp(hw);
1490
1491 if (sfp) {
1492 if (hw->phy.multispeed_fiber) {
1493 ixgbe_enable_tx_laser(hw);
1494 kpreempt_disable();
1495 softint_schedule(adapter->msf_si);
1496 kpreempt_enable();
1497 }
1498 kpreempt_disable();
1499 softint_schedule(adapter->mod_si);
1500 kpreempt_enable();
1501 } else {
1502 struct ifmedia *ifm = &adapter->media;
1503
1504 if (hw->mac.ops.check_link)
1505 err = ixgbe_check_link(hw, &adapter->link_speed,
1506 &adapter->link_up, FALSE);
1507 if (err)
1508 return;
1509
1510 /*
1511 * Check if it's the first call. If it's the first call,
1512 * get value for auto negotiation.
1513 */
1514 autoneg = hw->phy.autoneg_advertised;
1515 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1516 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1517 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1518 &negotiate);
1519 if (err)
1520 return;
1521 if (hw->mac.ops.setup_link)
1522 err = hw->mac.ops.setup_link(hw, autoneg,
1523 adapter->link_up);
1524 }
1525
1526 } /* ixgbe_config_link */
1527
1528 /************************************************************************
1529 * ixgbe_update_stats_counters - Update board statistics counters.
1530 ************************************************************************/
1531 static void
1532 ixgbe_update_stats_counters(struct adapter *adapter)
1533 {
1534 struct ifnet *ifp = adapter->ifp;
1535 struct ixgbe_hw *hw = &adapter->hw;
1536 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1537 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1538 u64 total_missed_rx = 0;
1539 uint64_t crcerrs, rlec;
1540
1541 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1542 stats->crcerrs.ev_count += crcerrs;
1543 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1544 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1545 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1546 if (hw->mac.type == ixgbe_mac_X550)
1547 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1548
1549 /* 16 registers */
1550 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1551 int j = i % adapter->num_queues;
1552
1553 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1554 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1555 if (hw->mac.type >= ixgbe_mac_82599EB) {
1556 stats->qprdc[j].ev_count
1557 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1558 }
1559 }
1560
1561 /* 8 registers */
1562 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1563 uint32_t mp;
1564 int j = i % adapter->num_queues;
1565
1566 /* MPC */
1567 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1568 /* global total per queue */
1569 stats->mpc[j].ev_count += mp;
1570 /* running comprehensive total for stats display */
1571 total_missed_rx += mp;
1572
1573 if (hw->mac.type == ixgbe_mac_82598EB)
1574 stats->rnbc[j].ev_count
1575 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1576
1577 stats->pxontxc[j].ev_count
1578 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1579 stats->pxofftxc[j].ev_count
1580 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1581 if (hw->mac.type >= ixgbe_mac_82599EB) {
1582 stats->pxonrxc[j].ev_count
1583 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1584 stats->pxoffrxc[j].ev_count
1585 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1586 stats->pxon2offc[j].ev_count
1587 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1588 } else {
1589 stats->pxonrxc[j].ev_count
1590 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1591 stats->pxoffrxc[j].ev_count
1592 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1593 }
1594 }
1595 stats->mpctotal.ev_count += total_missed_rx;
1596
1597 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1598 if ((adapter->link_active == TRUE)
1599 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1600 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1601 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1602 }
1603 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1604 stats->rlec.ev_count += rlec;
1605
1606 /* Hardware workaround, gprc counts missed packets */
1607 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1608
1609 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1610 stats->lxontxc.ev_count += lxon;
1611 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1612 stats->lxofftxc.ev_count += lxoff;
1613 total = lxon + lxoff;
1614
1615 if (hw->mac.type != ixgbe_mac_82598EB) {
1616 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1617 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1618 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1619 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1620 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1621 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1622 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1623 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1624 } else {
1625 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1626 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1627 /* 82598 only has a counter in the high register */
1628 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1629 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1630 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1631 }
1632
1633 /*
1634 * Workaround: mprc hardware is incorrectly counting
1635 * broadcasts, so for now we subtract those.
1636 */
1637 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1638 stats->bprc.ev_count += bprc;
1639 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1640 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1641
1642 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1643 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1644 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1645 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1646 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1647 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1648
1649 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1650 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1651 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1652
1653 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1654 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1655 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1656 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1657 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1658 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1659 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1660 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1661 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1662 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1663 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1664 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1665 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1666 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1667 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1668 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1669 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1670 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1671 /* Only read FCOE on 82599 */
1672 if (hw->mac.type != ixgbe_mac_82598EB) {
1673 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1674 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1675 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1676 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1677 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1678 }
1679
1680 /* Fill out the OS statistics structure */
1681 /*
1682 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1683 * adapter->stats counters. It's required to make ifconfig -z
1684 * (SOICZIFDATA) work.
1685 */
1686 ifp->if_collisions = 0;
1687
1688 /* Rx Errors */
1689 ifp->if_iqdrops += total_missed_rx;
1690 ifp->if_ierrors += crcerrs + rlec;
1691 } /* ixgbe_update_stats_counters */
1692
1693 /************************************************************************
1694 * ixgbe_add_hw_stats
1695 *
1696 * Add sysctl variables, one per statistic, to the system.
1697 ************************************************************************/
1698 static void
1699 ixgbe_add_hw_stats(struct adapter *adapter)
1700 {
1701 device_t dev = adapter->dev;
1702 const struct sysctlnode *rnode, *cnode;
1703 struct sysctllog **log = &adapter->sysctllog;
1704 struct tx_ring *txr = adapter->tx_rings;
1705 struct rx_ring *rxr = adapter->rx_rings;
1706 struct ixgbe_hw *hw = &adapter->hw;
1707 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1708 const char *xname = device_xname(dev);
1709 int i;
1710
1711 /* Driver Statistics */
1712 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1713 NULL, xname, "Driver tx dma soft fail EFBIG");
1714 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1715 NULL, xname, "m_defrag() failed");
1716 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1717 NULL, xname, "Driver tx dma hard fail EFBIG");
1718 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1719 NULL, xname, "Driver tx dma hard fail EINVAL");
1720 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1721 NULL, xname, "Driver tx dma hard fail other");
1722 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1723 NULL, xname, "Driver tx dma soft fail EAGAIN");
1724 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1725 NULL, xname, "Driver tx dma soft fail ENOMEM");
1726 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1727 NULL, xname, "Watchdog timeouts");
1728 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1729 NULL, xname, "TSO errors");
1730 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1731 NULL, xname, "Link MSI-X IRQ Handled");
1732 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1733 NULL, xname, "Link softint");
1734 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1735 NULL, xname, "module softint");
1736 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1737 NULL, xname, "multimode softint");
1738 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1739 NULL, xname, "external PHY softint");
1740
1741 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1742 #ifdef LRO
1743 struct lro_ctrl *lro = &rxr->lro;
1744 #endif /* LRO */
1745
1746 snprintf(adapter->queues[i].evnamebuf,
1747 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1748 xname, i);
1749 snprintf(adapter->queues[i].namebuf,
1750 sizeof(adapter->queues[i].namebuf), "q%d", i);
1751
1752 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1753 aprint_error_dev(dev, "could not create sysctl root\n");
1754 break;
1755 }
1756
1757 if (sysctl_createv(log, 0, &rnode, &rnode,
1758 0, CTLTYPE_NODE,
1759 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1760 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1761 break;
1762
1763 if (sysctl_createv(log, 0, &rnode, &cnode,
1764 CTLFLAG_READWRITE, CTLTYPE_INT,
1765 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1766 ixgbe_sysctl_interrupt_rate_handler, 0,
1767 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1768 break;
1769
1770 if (sysctl_createv(log, 0, &rnode, &cnode,
1771 CTLFLAG_READONLY, CTLTYPE_INT,
1772 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1773 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1774 0, CTL_CREATE, CTL_EOL) != 0)
1775 break;
1776
1777 if (sysctl_createv(log, 0, &rnode, &cnode,
1778 CTLFLAG_READONLY, CTLTYPE_INT,
1779 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1780 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1781 0, CTL_CREATE, CTL_EOL) != 0)
1782 break;
1783
1784 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1785 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1786 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1787 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1788 "Handled queue in softint");
1789 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1790 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1791 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1792 NULL, adapter->queues[i].evnamebuf, "TSO");
1793 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1794 NULL, adapter->queues[i].evnamebuf,
1795 "Queue No Descriptor Available");
1796 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1797 NULL, adapter->queues[i].evnamebuf,
1798 "Queue Packets Transmitted");
1799 #ifndef IXGBE_LEGACY_TX
1800 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1801 NULL, adapter->queues[i].evnamebuf,
1802 "Packets dropped in pcq");
1803 #endif
1804
1805 if (sysctl_createv(log, 0, &rnode, &cnode,
1806 CTLFLAG_READONLY,
1807 CTLTYPE_INT,
1808 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1809 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1810 CTL_CREATE, CTL_EOL) != 0)
1811 break;
1812
1813 if (sysctl_createv(log, 0, &rnode, &cnode,
1814 CTLFLAG_READONLY,
1815 CTLTYPE_INT,
1816 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1817 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1818 CTL_CREATE, CTL_EOL) != 0)
1819 break;
1820
1821 if (sysctl_createv(log, 0, &rnode, &cnode,
1822 CTLFLAG_READONLY,
1823 CTLTYPE_INT,
1824 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1825 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1826 CTL_CREATE, CTL_EOL) != 0)
1827 break;
1828
1829 if (i < __arraycount(stats->mpc)) {
1830 evcnt_attach_dynamic(&stats->mpc[i],
1831 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1832 "RX Missed Packet Count");
1833 if (hw->mac.type == ixgbe_mac_82598EB)
1834 evcnt_attach_dynamic(&stats->rnbc[i],
1835 EVCNT_TYPE_MISC, NULL,
1836 adapter->queues[i].evnamebuf,
1837 "Receive No Buffers");
1838 }
1839 if (i < __arraycount(stats->pxontxc)) {
1840 evcnt_attach_dynamic(&stats->pxontxc[i],
1841 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1842 "pxontxc");
1843 evcnt_attach_dynamic(&stats->pxonrxc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1845 "pxonrxc");
1846 evcnt_attach_dynamic(&stats->pxofftxc[i],
1847 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1848 "pxofftxc");
1849 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1850 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1851 "pxoffrxc");
1852 if (hw->mac.type >= ixgbe_mac_82599EB)
1853 evcnt_attach_dynamic(&stats->pxon2offc[i],
1854 EVCNT_TYPE_MISC, NULL,
1855 adapter->queues[i].evnamebuf,
1856 "pxon2offc");
1857 }
1858 if (i < __arraycount(stats->qprc)) {
1859 evcnt_attach_dynamic(&stats->qprc[i],
1860 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1861 "qprc");
1862 evcnt_attach_dynamic(&stats->qptc[i],
1863 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1864 "qptc");
1865 evcnt_attach_dynamic(&stats->qbrc[i],
1866 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1867 "qbrc");
1868 evcnt_attach_dynamic(&stats->qbtc[i],
1869 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1870 "qbtc");
1871 if (hw->mac.type >= ixgbe_mac_82599EB)
1872 evcnt_attach_dynamic(&stats->qprdc[i],
1873 EVCNT_TYPE_MISC, NULL,
1874 adapter->queues[i].evnamebuf, "qprdc");
1875 }
1876
1877 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1878 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1879 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1880 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1881 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1882 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1883 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1884 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1885 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1886 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1887 #ifdef LRO
1888 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1889 CTLFLAG_RD, &lro->lro_queued, 0,
1890 "LRO Queued");
1891 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1892 CTLFLAG_RD, &lro->lro_flushed, 0,
1893 "LRO Flushed");
1894 #endif /* LRO */
1895 }
1896
1897 /* MAC stats get their own sub node */
1898
1899 snprintf(stats->namebuf,
1900 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1901
1902 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1903 stats->namebuf, "rx csum offload - IP");
1904 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1905 stats->namebuf, "rx csum offload - L4");
1906 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1907 stats->namebuf, "rx csum offload - IP bad");
1908 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1909 stats->namebuf, "rx csum offload - L4 bad");
1910 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1911 stats->namebuf, "Interrupt conditions zero");
1912 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1913 stats->namebuf, "Legacy interrupts");
1914
1915 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1916 stats->namebuf, "CRC Errors");
1917 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1918 stats->namebuf, "Illegal Byte Errors");
1919 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1920 stats->namebuf, "Byte Errors");
1921 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1922 stats->namebuf, "MAC Short Packets Discarded");
1923 if (hw->mac.type >= ixgbe_mac_X550)
1924 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1925 stats->namebuf, "Bad SFD");
1926 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1927 stats->namebuf, "Total Packets Missed");
1928 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1929 stats->namebuf, "MAC Local Faults");
1930 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1931 stats->namebuf, "MAC Remote Faults");
1932 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1933 stats->namebuf, "Receive Length Errors");
1934 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1935 stats->namebuf, "Link XON Transmitted");
1936 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1937 stats->namebuf, "Link XON Received");
1938 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1939 stats->namebuf, "Link XOFF Transmitted");
1940 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "Link XOFF Received");
1942
1943 /* Packet Reception Stats */
1944 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "Total Octets Received");
1946 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "Good Octets Received");
1948 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "Total Packets Received");
1950 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Good Packets Received");
1952 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Multicast Packets Received");
1954 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Broadcast Packets Received");
1956 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "64 byte frames received ");
1958 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "65-127 byte frames received");
1960 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "128-255 byte frames received");
1962 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "256-511 byte frames received");
1964 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "512-1023 byte frames received");
1966 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "1023-1522 byte frames received");
1968 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "Receive Undersized");
1970 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "Fragmented Packets Received ");
1972 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "Oversized Packets Received");
1974 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Received Jabber");
1976 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Management Packets Received");
1978 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Management Packets Dropped");
1980 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1981 stats->namebuf, "Checksum Errors");
1982
1983 /* Packet Transmission Stats */
1984 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Good Octets Transmitted");
1986 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Total Packets Transmitted");
1988 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Good Packets Transmitted");
1990 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Broadcast Packets Transmitted");
1992 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Multicast Packets Transmitted");
1994 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "Management Packets Transmitted");
1996 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "64 byte frames transmitted ");
1998 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "65-127 byte frames transmitted");
2000 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "128-255 byte frames transmitted");
2002 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "256-511 byte frames transmitted");
2004 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "512-1023 byte frames transmitted");
2006 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "1024-1522 byte frames transmitted");
2008 } /* ixgbe_add_hw_stats */
2009
2010 static void
2011 ixgbe_clear_evcnt(struct adapter *adapter)
2012 {
2013 struct tx_ring *txr = adapter->tx_rings;
2014 struct rx_ring *rxr = adapter->rx_rings;
2015 struct ixgbe_hw *hw = &adapter->hw;
2016 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2017
2018 adapter->efbig_tx_dma_setup.ev_count = 0;
2019 adapter->mbuf_defrag_failed.ev_count = 0;
2020 adapter->efbig2_tx_dma_setup.ev_count = 0;
2021 adapter->einval_tx_dma_setup.ev_count = 0;
2022 adapter->other_tx_dma_setup.ev_count = 0;
2023 adapter->eagain_tx_dma_setup.ev_count = 0;
2024 adapter->enomem_tx_dma_setup.ev_count = 0;
2025 adapter->tso_err.ev_count = 0;
2026 adapter->watchdog_events.ev_count = 0;
2027 adapter->link_irq.ev_count = 0;
2028 adapter->link_sicount.ev_count = 0;
2029 adapter->mod_sicount.ev_count = 0;
2030 adapter->msf_sicount.ev_count = 0;
2031 adapter->phy_sicount.ev_count = 0;
2032
2033 txr = adapter->tx_rings;
2034 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2035 adapter->queues[i].irqs.ev_count = 0;
2036 adapter->queues[i].handleq.ev_count = 0;
2037 adapter->queues[i].req.ev_count = 0;
2038 txr->no_desc_avail.ev_count = 0;
2039 txr->total_packets.ev_count = 0;
2040 txr->tso_tx.ev_count = 0;
2041 #ifndef IXGBE_LEGACY_TX
2042 txr->pcq_drops.ev_count = 0;
2043 #endif
2044 txr->q_efbig_tx_dma_setup = 0;
2045 txr->q_mbuf_defrag_failed = 0;
2046 txr->q_efbig2_tx_dma_setup = 0;
2047 txr->q_einval_tx_dma_setup = 0;
2048 txr->q_other_tx_dma_setup = 0;
2049 txr->q_eagain_tx_dma_setup = 0;
2050 txr->q_enomem_tx_dma_setup = 0;
2051 txr->q_tso_err = 0;
2052
2053 if (i < __arraycount(stats->mpc)) {
2054 stats->mpc[i].ev_count = 0;
2055 if (hw->mac.type == ixgbe_mac_82598EB)
2056 stats->rnbc[i].ev_count = 0;
2057 }
2058 if (i < __arraycount(stats->pxontxc)) {
2059 stats->pxontxc[i].ev_count = 0;
2060 stats->pxonrxc[i].ev_count = 0;
2061 stats->pxofftxc[i].ev_count = 0;
2062 stats->pxoffrxc[i].ev_count = 0;
2063 if (hw->mac.type >= ixgbe_mac_82599EB)
2064 stats->pxon2offc[i].ev_count = 0;
2065 }
2066 if (i < __arraycount(stats->qprc)) {
2067 stats->qprc[i].ev_count = 0;
2068 stats->qptc[i].ev_count = 0;
2069 stats->qbrc[i].ev_count = 0;
2070 stats->qbtc[i].ev_count = 0;
2071 if (hw->mac.type >= ixgbe_mac_82599EB)
2072 stats->qprdc[i].ev_count = 0;
2073 }
2074
2075 rxr->rx_packets.ev_count = 0;
2076 rxr->rx_bytes.ev_count = 0;
2077 rxr->rx_copies.ev_count = 0;
2078 rxr->no_jmbuf.ev_count = 0;
2079 rxr->rx_discarded.ev_count = 0;
2080 }
2081 stats->ipcs.ev_count = 0;
2082 stats->l4cs.ev_count = 0;
2083 stats->ipcs_bad.ev_count = 0;
2084 stats->l4cs_bad.ev_count = 0;
2085 stats->intzero.ev_count = 0;
2086 stats->legint.ev_count = 0;
2087 stats->crcerrs.ev_count = 0;
2088 stats->illerrc.ev_count = 0;
2089 stats->errbc.ev_count = 0;
2090 stats->mspdc.ev_count = 0;
2091 stats->mbsdc.ev_count = 0;
2092 stats->mpctotal.ev_count = 0;
2093 stats->mlfc.ev_count = 0;
2094 stats->mrfc.ev_count = 0;
2095 stats->rlec.ev_count = 0;
2096 stats->lxontxc.ev_count = 0;
2097 stats->lxonrxc.ev_count = 0;
2098 stats->lxofftxc.ev_count = 0;
2099 stats->lxoffrxc.ev_count = 0;
2100
2101 /* Packet Reception Stats */
2102 stats->tor.ev_count = 0;
2103 stats->gorc.ev_count = 0;
2104 stats->tpr.ev_count = 0;
2105 stats->gprc.ev_count = 0;
2106 stats->mprc.ev_count = 0;
2107 stats->bprc.ev_count = 0;
2108 stats->prc64.ev_count = 0;
2109 stats->prc127.ev_count = 0;
2110 stats->prc255.ev_count = 0;
2111 stats->prc511.ev_count = 0;
2112 stats->prc1023.ev_count = 0;
2113 stats->prc1522.ev_count = 0;
2114 stats->ruc.ev_count = 0;
2115 stats->rfc.ev_count = 0;
2116 stats->roc.ev_count = 0;
2117 stats->rjc.ev_count = 0;
2118 stats->mngprc.ev_count = 0;
2119 stats->mngpdc.ev_count = 0;
2120 stats->xec.ev_count = 0;
2121
2122 /* Packet Transmission Stats */
2123 stats->gotc.ev_count = 0;
2124 stats->tpt.ev_count = 0;
2125 stats->gptc.ev_count = 0;
2126 stats->bptc.ev_count = 0;
2127 stats->mptc.ev_count = 0;
2128 stats->mngptc.ev_count = 0;
2129 stats->ptc64.ev_count = 0;
2130 stats->ptc127.ev_count = 0;
2131 stats->ptc255.ev_count = 0;
2132 stats->ptc511.ev_count = 0;
2133 stats->ptc1023.ev_count = 0;
2134 stats->ptc1522.ev_count = 0;
2135 }
2136
2137 /************************************************************************
2138 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2139 *
2140 * Retrieves the TDH value from the hardware
2141 ************************************************************************/
2142 static int
2143 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2144 {
2145 struct sysctlnode node = *rnode;
2146 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2147 uint32_t val;
2148
2149 if (!txr)
2150 return (0);
2151
2152 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2153 node.sysctl_data = &val;
2154 return sysctl_lookup(SYSCTLFN_CALL(&node));
2155 } /* ixgbe_sysctl_tdh_handler */
2156
2157 /************************************************************************
2158 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2159 *
2160 * Retrieves the TDT value from the hardware
2161 ************************************************************************/
2162 static int
2163 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2164 {
2165 struct sysctlnode node = *rnode;
2166 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2167 uint32_t val;
2168
2169 if (!txr)
2170 return (0);
2171
2172 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2173 node.sysctl_data = &val;
2174 return sysctl_lookup(SYSCTLFN_CALL(&node));
2175 } /* ixgbe_sysctl_tdt_handler */
2176
2177 /************************************************************************
2178 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2179 * handler function
2180 *
2181 * Retrieves the next_to_check value
2182 ************************************************************************/
2183 static int
2184 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2185 {
2186 struct sysctlnode node = *rnode;
2187 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2188 uint32_t val;
2189
2190 if (!rxr)
2191 return (0);
2192
2193 val = rxr->next_to_check;
2194 node.sysctl_data = &val;
2195 return sysctl_lookup(SYSCTLFN_CALL(&node));
2196 } /* ixgbe_sysctl_next_to_check_handler */
2197
2198 /************************************************************************
2199 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2200 *
2201 * Retrieves the RDH value from the hardware
2202 ************************************************************************/
2203 static int
2204 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2205 {
2206 struct sysctlnode node = *rnode;
2207 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2208 uint32_t val;
2209
2210 if (!rxr)
2211 return (0);
2212
2213 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2214 node.sysctl_data = &val;
2215 return sysctl_lookup(SYSCTLFN_CALL(&node));
2216 } /* ixgbe_sysctl_rdh_handler */
2217
2218 /************************************************************************
2219 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2220 *
2221 * Retrieves the RDT value from the hardware
2222 ************************************************************************/
2223 static int
2224 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2225 {
2226 struct sysctlnode node = *rnode;
2227 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2228 uint32_t val;
2229
2230 if (!rxr)
2231 return (0);
2232
2233 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2234 node.sysctl_data = &val;
2235 return sysctl_lookup(SYSCTLFN_CALL(&node));
2236 } /* ixgbe_sysctl_rdt_handler */
2237
2238 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2239 /************************************************************************
2240 * ixgbe_register_vlan
2241 *
2242 * Run via vlan config EVENT, it enables us to use the
2243 * HW Filter table since we can get the vlan id. This
2244 * just creates the entry in the soft version of the
2245 * VFTA, init will repopulate the real table.
2246 ************************************************************************/
2247 static void
2248 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2249 {
2250 struct adapter *adapter = ifp->if_softc;
2251 u16 index, bit;
2252
2253 if (ifp->if_softc != arg) /* Not our event */
2254 return;
2255
2256 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2257 return;
2258
2259 IXGBE_CORE_LOCK(adapter);
2260 index = (vtag >> 5) & 0x7F;
2261 bit = vtag & 0x1F;
2262 adapter->shadow_vfta[index] |= (1 << bit);
2263 ixgbe_setup_vlan_hw_support(adapter);
2264 IXGBE_CORE_UNLOCK(adapter);
2265 } /* ixgbe_register_vlan */
2266
2267 /************************************************************************
2268 * ixgbe_unregister_vlan
2269 *
2270 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2271 ************************************************************************/
2272 static void
2273 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2274 {
2275 struct adapter *adapter = ifp->if_softc;
2276 u16 index, bit;
2277
2278 if (ifp->if_softc != arg)
2279 return;
2280
2281 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2282 return;
2283
2284 IXGBE_CORE_LOCK(adapter);
2285 index = (vtag >> 5) & 0x7F;
2286 bit = vtag & 0x1F;
2287 adapter->shadow_vfta[index] &= ~(1 << bit);
2288 /* Re-init to load the changes */
2289 ixgbe_setup_vlan_hw_support(adapter);
2290 IXGBE_CORE_UNLOCK(adapter);
2291 } /* ixgbe_unregister_vlan */
2292 #endif
2293
2294 static void
2295 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2296 {
2297 struct ethercom *ec = &adapter->osdep.ec;
2298 struct ixgbe_hw *hw = &adapter->hw;
2299 struct rx_ring *rxr;
2300 int i;
2301 u32 ctrl;
2302
2303
2304 /*
2305 * We get here thru init_locked, meaning
2306 * a soft reset, this has already cleared
2307 * the VFTA and other state, so if there
2308 * have been no vlan's registered do nothing.
2309 */
2310 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2311 return;
2312
2313 /* Setup the queues for vlans */
2314 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2315 for (i = 0; i < adapter->num_queues; i++) {
2316 rxr = &adapter->rx_rings[i];
2317 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2318 if (hw->mac.type != ixgbe_mac_82598EB) {
2319 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2320 ctrl |= IXGBE_RXDCTL_VME;
2321 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2322 }
2323 rxr->vtag_strip = TRUE;
2324 }
2325 }
2326
2327 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2328 return;
2329 /*
2330 * A soft reset zero's out the VFTA, so
2331 * we need to repopulate it now.
2332 */
2333 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2334 if (adapter->shadow_vfta[i] != 0)
2335 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2336 adapter->shadow_vfta[i]);
2337
2338 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2339 /* Enable the Filter Table if enabled */
2340 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2341 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2342 ctrl |= IXGBE_VLNCTRL_VFE;
2343 }
2344 if (hw->mac.type == ixgbe_mac_82598EB)
2345 ctrl |= IXGBE_VLNCTRL_VME;
2346 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2347 } /* ixgbe_setup_vlan_hw_support */
2348
2349 /************************************************************************
2350 * ixgbe_get_slot_info
2351 *
2352 * Get the width and transaction speed of
2353 * the slot this adapter is plugged into.
2354 ************************************************************************/
2355 static void
2356 ixgbe_get_slot_info(struct adapter *adapter)
2357 {
2358 device_t dev = adapter->dev;
2359 struct ixgbe_hw *hw = &adapter->hw;
2360 u32 offset;
2361 u16 link;
2362 int bus_info_valid = TRUE;
2363
2364 /* Some devices are behind an internal bridge */
2365 switch (hw->device_id) {
2366 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2367 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2368 goto get_parent_info;
2369 default:
2370 break;
2371 }
2372
2373 ixgbe_get_bus_info(hw);
2374
2375 /*
2376 * Some devices don't use PCI-E, but there is no need
2377 * to display "Unknown" for bus speed and width.
2378 */
2379 switch (hw->mac.type) {
2380 case ixgbe_mac_X550EM_x:
2381 case ixgbe_mac_X550EM_a:
2382 return;
2383 default:
2384 goto display;
2385 }
2386
2387 get_parent_info:
2388 /*
2389 * For the Quad port adapter we need to parse back
2390 * up the PCI tree to find the speed of the expansion
2391 * slot into which this adapter is plugged. A bit more work.
2392 */
2393 dev = device_parent(device_parent(dev));
2394 #if 0
2395 #ifdef IXGBE_DEBUG
2396 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2397 pci_get_slot(dev), pci_get_function(dev));
2398 #endif
2399 dev = device_parent(device_parent(dev));
2400 #ifdef IXGBE_DEBUG
2401 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2402 pci_get_slot(dev), pci_get_function(dev));
2403 #endif
2404 #endif
2405 /* Now get the PCI Express Capabilities offset */
2406 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2407 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2408 /*
2409 * Hmm...can't get PCI-Express capabilities.
2410 * Falling back to default method.
2411 */
2412 bus_info_valid = FALSE;
2413 ixgbe_get_bus_info(hw);
2414 goto display;
2415 }
2416 /* ...and read the Link Status Register */
2417 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2418 offset + PCIE_LCSR) >> 16;
2419 ixgbe_set_pci_config_data_generic(hw, link);
2420
2421 display:
2422 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2423 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2424 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2425 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2426 "Unknown"),
2427 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2428 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2429 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2430 "Unknown"));
2431
2432 if (bus_info_valid) {
2433 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2434 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2435 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2436 device_printf(dev, "PCI-Express bandwidth available"
2437 " for this card\n is not sufficient for"
2438 " optimal performance.\n");
2439 device_printf(dev, "For optimal performance a x8 "
2440 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2441 }
2442 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2443 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2444 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2445 device_printf(dev, "PCI-Express bandwidth available"
2446 " for this card\n is not sufficient for"
2447 " optimal performance.\n");
2448 device_printf(dev, "For optimal performance a x8 "
2449 "PCIE Gen3 slot is required.\n");
2450 }
2451 } else
2452 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2453
2454 return;
2455 } /* ixgbe_get_slot_info */
2456
2457 /************************************************************************
2458 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2459 ************************************************************************/
2460 static inline void
2461 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2462 {
2463 struct ixgbe_hw *hw = &adapter->hw;
2464 struct ix_queue *que = &adapter->queues[vector];
2465 u64 queue = (u64)(1ULL << vector);
2466 u32 mask;
2467
2468 mutex_enter(&que->dc_mtx);
2469 if (que->disabled_count > 0 && --que->disabled_count > 0)
2470 goto out;
2471
2472 if (hw->mac.type == ixgbe_mac_82598EB) {
2473 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2474 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2475 } else {
2476 mask = (queue & 0xFFFFFFFF);
2477 if (mask)
2478 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2479 mask = (queue >> 32);
2480 if (mask)
2481 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2482 }
2483 out:
2484 mutex_exit(&que->dc_mtx);
2485 } /* ixgbe_enable_queue */
2486
2487 /************************************************************************
2488 * ixgbe_disable_queue_internal
2489 ************************************************************************/
2490 static inline void
2491 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2492 {
2493 struct ixgbe_hw *hw = &adapter->hw;
2494 struct ix_queue *que = &adapter->queues[vector];
2495 u64 queue = (u64)(1ULL << vector);
2496 u32 mask;
2497
2498 mutex_enter(&que->dc_mtx);
2499
2500 if (que->disabled_count > 0) {
2501 if (nestok)
2502 que->disabled_count++;
2503 goto out;
2504 }
2505 que->disabled_count++;
2506
2507 if (hw->mac.type == ixgbe_mac_82598EB) {
2508 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2509 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2510 } else {
2511 mask = (queue & 0xFFFFFFFF);
2512 if (mask)
2513 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2514 mask = (queue >> 32);
2515 if (mask)
2516 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2517 }
2518 out:
2519 mutex_exit(&que->dc_mtx);
2520 } /* ixgbe_disable_queue_internal */
2521
2522 /************************************************************************
2523 * ixgbe_disable_queue
2524 ************************************************************************/
2525 static inline void
2526 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2527 {
2528
2529 ixgbe_disable_queue_internal(adapter, vector, true);
2530 } /* ixgbe_disable_queue */
2531
2532 /************************************************************************
2533 * ixgbe_sched_handle_que - schedule deferred packet processing
2534 ************************************************************************/
2535 static inline void
2536 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2537 {
2538
2539 if(que->txrx_use_workqueue) {
2540 /*
2541 * adapter->que_wq is bound to each CPU instead of
2542 * each NIC queue to reduce workqueue kthread. As we
2543 * should consider about interrupt affinity in this
2544 * function, the workqueue kthread must be WQ_PERCPU.
2545 * If create WQ_PERCPU workqueue kthread for each NIC
2546 * queue, that number of created workqueue kthread is
2547 * (number of used NIC queue) * (number of CPUs) =
2548 * (number of CPUs) ^ 2 most often.
2549 *
2550 * The same NIC queue's interrupts are avoided by
2551 * masking the queue's interrupt. And different
2552 * NIC queue's interrupts use different struct work
2553 * (que->wq_cookie). So, "enqueued flag" to avoid
2554 * twice workqueue_enqueue() is not required .
2555 */
2556 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2557 } else {
2558 softint_schedule(que->que_si);
2559 }
2560 }
2561
2562 /************************************************************************
2563 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2564 ************************************************************************/
2565 static int
2566 ixgbe_msix_que(void *arg)
2567 {
2568 struct ix_queue *que = arg;
2569 struct adapter *adapter = que->adapter;
2570 struct ifnet *ifp = adapter->ifp;
2571 struct tx_ring *txr = que->txr;
2572 struct rx_ring *rxr = que->rxr;
2573 bool more;
2574 u32 newitr = 0;
2575
2576 /* Protect against spurious interrupts */
2577 if ((ifp->if_flags & IFF_RUNNING) == 0)
2578 return 0;
2579
2580 ixgbe_disable_queue(adapter, que->msix);
2581 ++que->irqs.ev_count;
2582
2583 /*
2584 * Don't change "que->txrx_use_workqueue" from this point to avoid
2585 * flip-flopping softint/workqueue mode in one deferred processing.
2586 */
2587 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2588
2589 #ifdef __NetBSD__
2590 /* Don't run ixgbe_rxeof in interrupt context */
2591 more = true;
2592 #else
2593 more = ixgbe_rxeof(que);
2594 #endif
2595
2596 IXGBE_TX_LOCK(txr);
2597 ixgbe_txeof(txr);
2598 IXGBE_TX_UNLOCK(txr);
2599
2600 /* Do AIM now? */
2601
2602 if (adapter->enable_aim == false)
2603 goto no_calc;
2604 /*
2605 * Do Adaptive Interrupt Moderation:
2606 * - Write out last calculated setting
2607 * - Calculate based on average size over
2608 * the last interval.
2609 */
2610 if (que->eitr_setting)
2611 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2612
2613 que->eitr_setting = 0;
2614
2615 /* Idle, do nothing */
2616 if ((txr->bytes == 0) && (rxr->bytes == 0))
2617 goto no_calc;
2618
2619 if ((txr->bytes) && (txr->packets))
2620 newitr = txr->bytes/txr->packets;
2621 if ((rxr->bytes) && (rxr->packets))
2622 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2623 newitr += 24; /* account for hardware frame, crc */
2624
2625 /* set an upper boundary */
2626 newitr = uimin(newitr, 3000);
2627
2628 /* Be nice to the mid range */
2629 if ((newitr > 300) && (newitr < 1200))
2630 newitr = (newitr / 3);
2631 else
2632 newitr = (newitr / 2);
2633
2634 /*
2635 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2636 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2637 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2638 * on 1G and higher.
2639 */
2640 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2641 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2642 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2643 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2644 }
2645
2646 /* save for next interrupt */
2647 que->eitr_setting = newitr;
2648
2649 /* Reset state */
2650 txr->bytes = 0;
2651 txr->packets = 0;
2652 rxr->bytes = 0;
2653 rxr->packets = 0;
2654
2655 no_calc:
2656 if (more)
2657 ixgbe_sched_handle_que(adapter, que);
2658 else
2659 ixgbe_enable_queue(adapter, que->msix);
2660
2661 return 1;
2662 } /* ixgbe_msix_que */
2663
2664 /************************************************************************
2665 * ixgbe_media_status - Media Ioctl callback
2666 *
2667 * Called whenever the user queries the status of
2668 * the interface using ifconfig.
2669 ************************************************************************/
2670 static void
2671 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2672 {
2673 struct adapter *adapter = ifp->if_softc;
2674 struct ixgbe_hw *hw = &adapter->hw;
2675 int layer;
2676
2677 INIT_DEBUGOUT("ixgbe_media_status: begin");
2678 IXGBE_CORE_LOCK(adapter);
2679 ixgbe_update_link_status(adapter);
2680
2681 ifmr->ifm_status = IFM_AVALID;
2682 ifmr->ifm_active = IFM_ETHER;
2683
2684 if (!adapter->link_active) {
2685 ifmr->ifm_active |= IFM_NONE;
2686 IXGBE_CORE_UNLOCK(adapter);
2687 return;
2688 }
2689
2690 ifmr->ifm_status |= IFM_ACTIVE;
2691 layer = adapter->phy_layer;
2692
2693 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2694 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2695 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2696 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2697 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2698 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2699 switch (adapter->link_speed) {
2700 case IXGBE_LINK_SPEED_10GB_FULL:
2701 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2702 break;
2703 case IXGBE_LINK_SPEED_5GB_FULL:
2704 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2705 break;
2706 case IXGBE_LINK_SPEED_2_5GB_FULL:
2707 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2708 break;
2709 case IXGBE_LINK_SPEED_1GB_FULL:
2710 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2711 break;
2712 case IXGBE_LINK_SPEED_100_FULL:
2713 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2714 break;
2715 case IXGBE_LINK_SPEED_10_FULL:
2716 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2717 break;
2718 }
2719 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2720 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2721 switch (adapter->link_speed) {
2722 case IXGBE_LINK_SPEED_10GB_FULL:
2723 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2724 break;
2725 }
2726 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2727 switch (adapter->link_speed) {
2728 case IXGBE_LINK_SPEED_10GB_FULL:
2729 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2730 break;
2731 case IXGBE_LINK_SPEED_1GB_FULL:
2732 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2733 break;
2734 }
2735 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2736 switch (adapter->link_speed) {
2737 case IXGBE_LINK_SPEED_10GB_FULL:
2738 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2739 break;
2740 case IXGBE_LINK_SPEED_1GB_FULL:
2741 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2742 break;
2743 }
2744 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2745 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2746 switch (adapter->link_speed) {
2747 case IXGBE_LINK_SPEED_10GB_FULL:
2748 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2749 break;
2750 case IXGBE_LINK_SPEED_1GB_FULL:
2751 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2752 break;
2753 }
2754 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2755 switch (adapter->link_speed) {
2756 case IXGBE_LINK_SPEED_10GB_FULL:
2757 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2758 break;
2759 }
2760 /*
2761 * XXX: These need to use the proper media types once
2762 * they're added.
2763 */
2764 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2765 switch (adapter->link_speed) {
2766 case IXGBE_LINK_SPEED_10GB_FULL:
2767 #ifndef IFM_ETH_XTYPE
2768 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2769 #else
2770 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2771 #endif
2772 break;
2773 case IXGBE_LINK_SPEED_2_5GB_FULL:
2774 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2775 break;
2776 case IXGBE_LINK_SPEED_1GB_FULL:
2777 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2778 break;
2779 }
2780 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2781 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2782 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2783 switch (adapter->link_speed) {
2784 case IXGBE_LINK_SPEED_10GB_FULL:
2785 #ifndef IFM_ETH_XTYPE
2786 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2787 #else
2788 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2789 #endif
2790 break;
2791 case IXGBE_LINK_SPEED_2_5GB_FULL:
2792 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2793 break;
2794 case IXGBE_LINK_SPEED_1GB_FULL:
2795 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2796 break;
2797 }
2798
2799 /* If nothing is recognized... */
2800 #if 0
2801 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2802 ifmr->ifm_active |= IFM_UNKNOWN;
2803 #endif
2804
2805 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2806
2807 /* Display current flow control setting used on link */
2808 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2809 hw->fc.current_mode == ixgbe_fc_full)
2810 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2811 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2812 hw->fc.current_mode == ixgbe_fc_full)
2813 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2814
2815 IXGBE_CORE_UNLOCK(adapter);
2816
2817 return;
2818 } /* ixgbe_media_status */
2819
2820 /************************************************************************
2821 * ixgbe_media_change - Media Ioctl callback
2822 *
2823 * Called when the user changes speed/duplex using
2824 * media/mediopt option with ifconfig.
2825 ************************************************************************/
2826 static int
2827 ixgbe_media_change(struct ifnet *ifp)
2828 {
2829 struct adapter *adapter = ifp->if_softc;
2830 struct ifmedia *ifm = &adapter->media;
2831 struct ixgbe_hw *hw = &adapter->hw;
2832 ixgbe_link_speed speed = 0;
2833 ixgbe_link_speed link_caps = 0;
2834 bool negotiate = false;
2835 s32 err = IXGBE_NOT_IMPLEMENTED;
2836
2837 INIT_DEBUGOUT("ixgbe_media_change: begin");
2838
2839 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2840 return (EINVAL);
2841
2842 if (hw->phy.media_type == ixgbe_media_type_backplane)
2843 return (EPERM);
2844
2845 IXGBE_CORE_LOCK(adapter);
2846 /*
2847 * We don't actually need to check against the supported
2848 * media types of the adapter; ifmedia will take care of
2849 * that for us.
2850 */
2851 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2852 case IFM_AUTO:
2853 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2854 &negotiate);
2855 if (err != IXGBE_SUCCESS) {
2856 device_printf(adapter->dev, "Unable to determine "
2857 "supported advertise speeds\n");
2858 IXGBE_CORE_UNLOCK(adapter);
2859 return (ENODEV);
2860 }
2861 speed |= link_caps;
2862 break;
2863 case IFM_10G_T:
2864 case IFM_10G_LRM:
2865 case IFM_10G_LR:
2866 case IFM_10G_TWINAX:
2867 #ifndef IFM_ETH_XTYPE
2868 case IFM_10G_SR: /* KR, too */
2869 case IFM_10G_CX4: /* KX4 */
2870 #else
2871 case IFM_10G_KR:
2872 case IFM_10G_KX4:
2873 #endif
2874 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2875 break;
2876 case IFM_5000_T:
2877 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2878 break;
2879 case IFM_2500_T:
2880 case IFM_2500_KX:
2881 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2882 break;
2883 case IFM_1000_T:
2884 case IFM_1000_LX:
2885 case IFM_1000_SX:
2886 case IFM_1000_KX:
2887 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2888 break;
2889 case IFM_100_TX:
2890 speed |= IXGBE_LINK_SPEED_100_FULL;
2891 break;
2892 case IFM_10_T:
2893 speed |= IXGBE_LINK_SPEED_10_FULL;
2894 break;
2895 case IFM_NONE:
2896 break;
2897 default:
2898 goto invalid;
2899 }
2900
2901 hw->mac.autotry_restart = TRUE;
2902 hw->mac.ops.setup_link(hw, speed, TRUE);
2903 adapter->advertise = 0;
2904 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2905 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2906 adapter->advertise |= 1 << 2;
2907 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2908 adapter->advertise |= 1 << 1;
2909 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2910 adapter->advertise |= 1 << 0;
2911 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2912 adapter->advertise |= 1 << 3;
2913 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2914 adapter->advertise |= 1 << 4;
2915 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2916 adapter->advertise |= 1 << 5;
2917 }
2918
2919 IXGBE_CORE_UNLOCK(adapter);
2920 return (0);
2921
2922 invalid:
2923 device_printf(adapter->dev, "Invalid media type!\n");
2924 IXGBE_CORE_UNLOCK(adapter);
2925
2926 return (EINVAL);
2927 } /* ixgbe_media_change */
2928
2929 /************************************************************************
2930 * ixgbe_set_promisc
2931 ************************************************************************/
2932 static void
2933 ixgbe_set_promisc(struct adapter *adapter)
2934 {
2935 struct ifnet *ifp = adapter->ifp;
2936 int mcnt = 0;
2937 u32 rctl;
2938 struct ether_multi *enm;
2939 struct ether_multistep step;
2940 struct ethercom *ec = &adapter->osdep.ec;
2941
2942 KASSERT(mutex_owned(&adapter->core_mtx));
2943 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2944 rctl &= (~IXGBE_FCTRL_UPE);
2945 if (ifp->if_flags & IFF_ALLMULTI)
2946 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2947 else {
2948 ETHER_LOCK(ec);
2949 ETHER_FIRST_MULTI(step, ec, enm);
2950 while (enm != NULL) {
2951 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2952 break;
2953 mcnt++;
2954 ETHER_NEXT_MULTI(step, enm);
2955 }
2956 ETHER_UNLOCK(ec);
2957 }
2958 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2959 rctl &= (~IXGBE_FCTRL_MPE);
2960 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2961
2962 if (ifp->if_flags & IFF_PROMISC) {
2963 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2964 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2965 } else if (ifp->if_flags & IFF_ALLMULTI) {
2966 rctl |= IXGBE_FCTRL_MPE;
2967 rctl &= ~IXGBE_FCTRL_UPE;
2968 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2969 }
2970 } /* ixgbe_set_promisc */
2971
2972 /************************************************************************
2973 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2974 ************************************************************************/
2975 static int
2976 ixgbe_msix_link(void *arg)
2977 {
2978 struct adapter *adapter = arg;
2979 struct ixgbe_hw *hw = &adapter->hw;
2980 u32 eicr, eicr_mask;
2981 s32 retval;
2982
2983 ++adapter->link_irq.ev_count;
2984
2985 /* Pause other interrupts */
2986 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2987
2988 /* First get the cause */
2989 /*
2990 * The specifications of 82598, 82599, X540 and X550 say EICS register
2991 * is write only. However, Linux says it is a workaround for silicon
2992 * errata to read EICS instead of EICR to get interrupt cause. It seems
2993 * there is a problem about read clear mechanism for EICR register.
2994 */
2995 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2996 /* Be sure the queue bits are not cleared */
2997 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2998 /* Clear interrupt with write */
2999 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3000
3001 /* Link status change */
3002 if (eicr & IXGBE_EICR_LSC) {
3003 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3004 softint_schedule(adapter->link_si);
3005 }
3006
3007 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3008 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3009 (eicr & IXGBE_EICR_FLOW_DIR)) {
3010 /* This is probably overkill :) */
3011 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3012 return 1;
3013 /* Disable the interrupt */
3014 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3015 softint_schedule(adapter->fdir_si);
3016 }
3017
3018 if (eicr & IXGBE_EICR_ECC) {
3019 device_printf(adapter->dev,
3020 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3021 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3022 }
3023
3024 /* Check for over temp condition */
3025 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3026 switch (adapter->hw.mac.type) {
3027 case ixgbe_mac_X550EM_a:
3028 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3029 break;
3030 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3031 IXGBE_EICR_GPI_SDP0_X550EM_a);
3032 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3033 IXGBE_EICR_GPI_SDP0_X550EM_a);
3034 retval = hw->phy.ops.check_overtemp(hw);
3035 if (retval != IXGBE_ERR_OVERTEMP)
3036 break;
3037 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3038 device_printf(adapter->dev, "System shutdown required!\n");
3039 break;
3040 default:
3041 if (!(eicr & IXGBE_EICR_TS))
3042 break;
3043 retval = hw->phy.ops.check_overtemp(hw);
3044 if (retval != IXGBE_ERR_OVERTEMP)
3045 break;
3046 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3047 device_printf(adapter->dev, "System shutdown required!\n");
3048 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3049 break;
3050 }
3051 }
3052
3053 /* Check for VF message */
3054 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3055 (eicr & IXGBE_EICR_MAILBOX))
3056 softint_schedule(adapter->mbx_si);
3057 }
3058
3059 if (ixgbe_is_sfp(hw)) {
3060 /* Pluggable optics-related interrupt */
3061 if (hw->mac.type >= ixgbe_mac_X540)
3062 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3063 else
3064 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3065
3066 if (eicr & eicr_mask) {
3067 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3068 softint_schedule(adapter->mod_si);
3069 }
3070
3071 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3072 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3073 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3074 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3075 softint_schedule(adapter->msf_si);
3076 }
3077 }
3078
3079 /* Check for fan failure */
3080 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3081 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3082 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3083 }
3084
3085 /* External PHY interrupt */
3086 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3087 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3088 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3089 softint_schedule(adapter->phy_si);
3090 }
3091
3092 /* Re-enable other interrupts */
3093 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3094 return 1;
3095 } /* ixgbe_msix_link */
3096
3097 static void
3098 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3099 {
3100
3101 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3102 itr |= itr << 16;
3103 else
3104 itr |= IXGBE_EITR_CNT_WDIS;
3105
3106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3107 }
3108
3109
3110 /************************************************************************
3111 * ixgbe_sysctl_interrupt_rate_handler
3112 ************************************************************************/
3113 static int
3114 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3115 {
3116 struct sysctlnode node = *rnode;
3117 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3118 struct adapter *adapter = que->adapter;
3119 uint32_t reg, usec, rate;
3120 int error;
3121
3122 if (que == NULL)
3123 return 0;
3124 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3125 usec = ((reg & 0x0FF8) >> 3);
3126 if (usec > 0)
3127 rate = 500000 / usec;
3128 else
3129 rate = 0;
3130 node.sysctl_data = &rate;
3131 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3132 if (error || newp == NULL)
3133 return error;
3134 reg &= ~0xfff; /* default, no limitation */
3135 if (rate > 0 && rate < 500000) {
3136 if (rate < 1000)
3137 rate = 1000;
3138 reg |= ((4000000/rate) & 0xff8);
3139 /*
3140 * When RSC is used, ITR interval must be larger than
3141 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3142 * The minimum value is always greater than 2us on 100M
3143 * (and 10M?(not documented)), but it's not on 1G and higher.
3144 */
3145 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3146 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3147 if ((adapter->num_queues > 1)
3148 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3149 return EINVAL;
3150 }
3151 ixgbe_max_interrupt_rate = rate;
3152 } else
3153 ixgbe_max_interrupt_rate = 0;
3154 ixgbe_eitr_write(adapter, que->msix, reg);
3155
3156 return (0);
3157 } /* ixgbe_sysctl_interrupt_rate_handler */
3158
3159 const struct sysctlnode *
3160 ixgbe_sysctl_instance(struct adapter *adapter)
3161 {
3162 const char *dvname;
3163 struct sysctllog **log;
3164 int rc;
3165 const struct sysctlnode *rnode;
3166
3167 if (adapter->sysctltop != NULL)
3168 return adapter->sysctltop;
3169
3170 log = &adapter->sysctllog;
3171 dvname = device_xname(adapter->dev);
3172
3173 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3174 0, CTLTYPE_NODE, dvname,
3175 SYSCTL_DESCR("ixgbe information and settings"),
3176 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3177 goto err;
3178
3179 return rnode;
3180 err:
3181 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3182 return NULL;
3183 }
3184
3185 /************************************************************************
3186 * ixgbe_add_device_sysctls
3187 ************************************************************************/
3188 static void
3189 ixgbe_add_device_sysctls(struct adapter *adapter)
3190 {
3191 device_t dev = adapter->dev;
3192 struct ixgbe_hw *hw = &adapter->hw;
3193 struct sysctllog **log;
3194 const struct sysctlnode *rnode, *cnode;
3195
3196 log = &adapter->sysctllog;
3197
3198 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3199 aprint_error_dev(dev, "could not create sysctl root\n");
3200 return;
3201 }
3202
3203 if (sysctl_createv(log, 0, &rnode, &cnode,
3204 CTLFLAG_READWRITE, CTLTYPE_INT,
3205 "debug", SYSCTL_DESCR("Debug Info"),
3206 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3207 aprint_error_dev(dev, "could not create sysctl\n");
3208
3209 if (sysctl_createv(log, 0, &rnode, &cnode,
3210 CTLFLAG_READONLY, CTLTYPE_INT,
3211 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3212 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3213 aprint_error_dev(dev, "could not create sysctl\n");
3214
3215 if (sysctl_createv(log, 0, &rnode, &cnode,
3216 CTLFLAG_READONLY, CTLTYPE_INT,
3217 "num_queues", SYSCTL_DESCR("Number of queues"),
3218 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3219 aprint_error_dev(dev, "could not create sysctl\n");
3220
3221 /* Sysctls for all devices */
3222 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3223 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3224 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3225 CTL_EOL) != 0)
3226 aprint_error_dev(dev, "could not create sysctl\n");
3227
3228 adapter->enable_aim = ixgbe_enable_aim;
3229 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3230 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3231 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3232 aprint_error_dev(dev, "could not create sysctl\n");
3233
3234 if (sysctl_createv(log, 0, &rnode, &cnode,
3235 CTLFLAG_READWRITE, CTLTYPE_INT,
3236 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3237 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3238 CTL_EOL) != 0)
3239 aprint_error_dev(dev, "could not create sysctl\n");
3240
3241 /*
3242 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3243 * it causesflip-flopping softint/workqueue mode in one deferred
3244 * processing. Therefore, preempt_disable()/preempt_enable() are
3245 * required in ixgbe_sched_handle_que() to avoid
3246 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3247 * I think changing "que->txrx_use_workqueue" in interrupt handler
3248 * is lighter than doing preempt_disable()/preempt_enable() in every
3249 * ixgbe_sched_handle_que().
3250 */
3251 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3252 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3253 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3254 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3255 aprint_error_dev(dev, "could not create sysctl\n");
3256
3257 #ifdef IXGBE_DEBUG
3258 /* testing sysctls (for all devices) */
3259 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3260 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3261 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3262 CTL_EOL) != 0)
3263 aprint_error_dev(dev, "could not create sysctl\n");
3264
3265 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3266 CTLTYPE_STRING, "print_rss_config",
3267 SYSCTL_DESCR("Prints RSS Configuration"),
3268 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3269 CTL_EOL) != 0)
3270 aprint_error_dev(dev, "could not create sysctl\n");
3271 #endif
3272 /* for X550 series devices */
3273 if (hw->mac.type >= ixgbe_mac_X550)
3274 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3275 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3276 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3277 CTL_EOL) != 0)
3278 aprint_error_dev(dev, "could not create sysctl\n");
3279
3280 /* for WoL-capable devices */
3281 if (adapter->wol_support) {
3282 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3283 CTLTYPE_BOOL, "wol_enable",
3284 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3285 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3286 CTL_EOL) != 0)
3287 aprint_error_dev(dev, "could not create sysctl\n");
3288
3289 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3290 CTLTYPE_INT, "wufc",
3291 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3292 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3293 CTL_EOL) != 0)
3294 aprint_error_dev(dev, "could not create sysctl\n");
3295 }
3296
3297 /* for X552/X557-AT devices */
3298 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3299 const struct sysctlnode *phy_node;
3300
3301 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3302 "phy", SYSCTL_DESCR("External PHY sysctls"),
3303 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3304 aprint_error_dev(dev, "could not create sysctl\n");
3305 return;
3306 }
3307
3308 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3309 CTLTYPE_INT, "temp",
3310 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3311 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3312 CTL_EOL) != 0)
3313 aprint_error_dev(dev, "could not create sysctl\n");
3314
3315 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3316 CTLTYPE_INT, "overtemp_occurred",
3317 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3318 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3319 CTL_CREATE, CTL_EOL) != 0)
3320 aprint_error_dev(dev, "could not create sysctl\n");
3321 }
3322
3323 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3324 && (hw->phy.type == ixgbe_phy_fw))
3325 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3326 CTLTYPE_BOOL, "force_10_100_autonego",
3327 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3328 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3329 CTL_CREATE, CTL_EOL) != 0)
3330 aprint_error_dev(dev, "could not create sysctl\n");
3331
3332 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3333 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3334 CTLTYPE_INT, "eee_state",
3335 SYSCTL_DESCR("EEE Power Save State"),
3336 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3337 CTL_EOL) != 0)
3338 aprint_error_dev(dev, "could not create sysctl\n");
3339 }
3340 } /* ixgbe_add_device_sysctls */
3341
3342 /************************************************************************
3343 * ixgbe_allocate_pci_resources
3344 ************************************************************************/
3345 static int
3346 ixgbe_allocate_pci_resources(struct adapter *adapter,
3347 const struct pci_attach_args *pa)
3348 {
3349 pcireg_t memtype;
3350 device_t dev = adapter->dev;
3351 bus_addr_t addr;
3352 int flags;
3353
3354 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3355 switch (memtype) {
3356 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3357 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3358 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3359 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3360 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3361 goto map_err;
3362 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3363 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3364 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3365 }
3366 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3367 adapter->osdep.mem_size, flags,
3368 &adapter->osdep.mem_bus_space_handle) != 0) {
3369 map_err:
3370 adapter->osdep.mem_size = 0;
3371 aprint_error_dev(dev, "unable to map BAR0\n");
3372 return ENXIO;
3373 }
3374 break;
3375 default:
3376 aprint_error_dev(dev, "unexpected type on BAR0\n");
3377 return ENXIO;
3378 }
3379
3380 return (0);
3381 } /* ixgbe_allocate_pci_resources */
3382
3383 static void
3384 ixgbe_free_softint(struct adapter *adapter)
3385 {
3386 struct ix_queue *que = adapter->queues;
3387 struct tx_ring *txr = adapter->tx_rings;
3388 int i;
3389
3390 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3391 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3392 if (txr->txr_si != NULL)
3393 softint_disestablish(txr->txr_si);
3394 }
3395 if (que->que_si != NULL)
3396 softint_disestablish(que->que_si);
3397 }
3398 if (adapter->txr_wq != NULL)
3399 workqueue_destroy(adapter->txr_wq);
3400 if (adapter->txr_wq_enqueued != NULL)
3401 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3402 if (adapter->que_wq != NULL)
3403 workqueue_destroy(adapter->que_wq);
3404
3405 /* Drain the Link queue */
3406 if (adapter->link_si != NULL) {
3407 softint_disestablish(adapter->link_si);
3408 adapter->link_si = NULL;
3409 }
3410 if (adapter->mod_si != NULL) {
3411 softint_disestablish(adapter->mod_si);
3412 adapter->mod_si = NULL;
3413 }
3414 if (adapter->msf_si != NULL) {
3415 softint_disestablish(adapter->msf_si);
3416 adapter->msf_si = NULL;
3417 }
3418 if (adapter->phy_si != NULL) {
3419 softint_disestablish(adapter->phy_si);
3420 adapter->phy_si = NULL;
3421 }
3422 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3423 if (adapter->fdir_si != NULL) {
3424 softint_disestablish(adapter->fdir_si);
3425 adapter->fdir_si = NULL;
3426 }
3427 }
3428 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3429 if (adapter->mbx_si != NULL) {
3430 softint_disestablish(adapter->mbx_si);
3431 adapter->mbx_si = NULL;
3432 }
3433 }
3434 } /* ixgbe_free_softint */
3435
3436 /************************************************************************
3437 * ixgbe_detach - Device removal routine
3438 *
3439 * Called when the driver is being removed.
3440 * Stops the adapter and deallocates all the resources
3441 * that were allocated for driver operation.
3442 *
3443 * return 0 on success, positive on failure
3444 ************************************************************************/
3445 static int
3446 ixgbe_detach(device_t dev, int flags)
3447 {
3448 struct adapter *adapter = device_private(dev);
3449 struct rx_ring *rxr = adapter->rx_rings;
3450 struct tx_ring *txr = adapter->tx_rings;
3451 struct ixgbe_hw *hw = &adapter->hw;
3452 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3453 u32 ctrl_ext;
3454
3455 INIT_DEBUGOUT("ixgbe_detach: begin");
3456 if (adapter->osdep.attached == false)
3457 return 0;
3458
3459 if (ixgbe_pci_iov_detach(dev) != 0) {
3460 device_printf(dev, "SR-IOV in use; detach first.\n");
3461 return (EBUSY);
3462 }
3463
3464 /* Stop the interface. Callouts are stopped in it. */
3465 ixgbe_ifstop(adapter->ifp, 1);
3466 #if NVLAN > 0
3467 /* Make sure VLANs are not using driver */
3468 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3469 ; /* nothing to do: no VLANs */
3470 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3471 vlan_ifdetach(adapter->ifp);
3472 else {
3473 aprint_error_dev(dev, "VLANs in use, detach first\n");
3474 return (EBUSY);
3475 }
3476 #endif
3477
3478 pmf_device_deregister(dev);
3479
3480 ether_ifdetach(adapter->ifp);
3481 /* Stop the adapter */
3482 IXGBE_CORE_LOCK(adapter);
3483 ixgbe_setup_low_power_mode(adapter);
3484 IXGBE_CORE_UNLOCK(adapter);
3485
3486 ixgbe_free_softint(adapter);
3487
3488 /* let hardware know driver is unloading */
3489 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3490 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3492
3493 callout_halt(&adapter->timer, NULL);
3494
3495 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3496 netmap_detach(adapter->ifp);
3497
3498 ixgbe_free_pci_resources(adapter);
3499 #if 0 /* XXX the NetBSD port is probably missing something here */
3500 bus_generic_detach(dev);
3501 #endif
3502 if_detach(adapter->ifp);
3503 if_percpuq_destroy(adapter->ipq);
3504
3505 sysctl_teardown(&adapter->sysctllog);
3506 evcnt_detach(&adapter->efbig_tx_dma_setup);
3507 evcnt_detach(&adapter->mbuf_defrag_failed);
3508 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3509 evcnt_detach(&adapter->einval_tx_dma_setup);
3510 evcnt_detach(&adapter->other_tx_dma_setup);
3511 evcnt_detach(&adapter->eagain_tx_dma_setup);
3512 evcnt_detach(&adapter->enomem_tx_dma_setup);
3513 evcnt_detach(&adapter->watchdog_events);
3514 evcnt_detach(&adapter->tso_err);
3515 evcnt_detach(&adapter->link_irq);
3516 evcnt_detach(&adapter->link_sicount);
3517 evcnt_detach(&adapter->mod_sicount);
3518 evcnt_detach(&adapter->msf_sicount);
3519 evcnt_detach(&adapter->phy_sicount);
3520
3521 txr = adapter->tx_rings;
3522 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3523 evcnt_detach(&adapter->queues[i].irqs);
3524 evcnt_detach(&adapter->queues[i].handleq);
3525 evcnt_detach(&adapter->queues[i].req);
3526 evcnt_detach(&txr->no_desc_avail);
3527 evcnt_detach(&txr->total_packets);
3528 evcnt_detach(&txr->tso_tx);
3529 #ifndef IXGBE_LEGACY_TX
3530 evcnt_detach(&txr->pcq_drops);
3531 #endif
3532
3533 if (i < __arraycount(stats->mpc)) {
3534 evcnt_detach(&stats->mpc[i]);
3535 if (hw->mac.type == ixgbe_mac_82598EB)
3536 evcnt_detach(&stats->rnbc[i]);
3537 }
3538 if (i < __arraycount(stats->pxontxc)) {
3539 evcnt_detach(&stats->pxontxc[i]);
3540 evcnt_detach(&stats->pxonrxc[i]);
3541 evcnt_detach(&stats->pxofftxc[i]);
3542 evcnt_detach(&stats->pxoffrxc[i]);
3543 if (hw->mac.type >= ixgbe_mac_82599EB)
3544 evcnt_detach(&stats->pxon2offc[i]);
3545 }
3546 if (i < __arraycount(stats->qprc)) {
3547 evcnt_detach(&stats->qprc[i]);
3548 evcnt_detach(&stats->qptc[i]);
3549 evcnt_detach(&stats->qbrc[i]);
3550 evcnt_detach(&stats->qbtc[i]);
3551 if (hw->mac.type >= ixgbe_mac_82599EB)
3552 evcnt_detach(&stats->qprdc[i]);
3553 }
3554
3555 evcnt_detach(&rxr->rx_packets);
3556 evcnt_detach(&rxr->rx_bytes);
3557 evcnt_detach(&rxr->rx_copies);
3558 evcnt_detach(&rxr->no_jmbuf);
3559 evcnt_detach(&rxr->rx_discarded);
3560 }
3561 evcnt_detach(&stats->ipcs);
3562 evcnt_detach(&stats->l4cs);
3563 evcnt_detach(&stats->ipcs_bad);
3564 evcnt_detach(&stats->l4cs_bad);
3565 evcnt_detach(&stats->intzero);
3566 evcnt_detach(&stats->legint);
3567 evcnt_detach(&stats->crcerrs);
3568 evcnt_detach(&stats->illerrc);
3569 evcnt_detach(&stats->errbc);
3570 evcnt_detach(&stats->mspdc);
3571 if (hw->mac.type >= ixgbe_mac_X550)
3572 evcnt_detach(&stats->mbsdc);
3573 evcnt_detach(&stats->mpctotal);
3574 evcnt_detach(&stats->mlfc);
3575 evcnt_detach(&stats->mrfc);
3576 evcnt_detach(&stats->rlec);
3577 evcnt_detach(&stats->lxontxc);
3578 evcnt_detach(&stats->lxonrxc);
3579 evcnt_detach(&stats->lxofftxc);
3580 evcnt_detach(&stats->lxoffrxc);
3581
3582 /* Packet Reception Stats */
3583 evcnt_detach(&stats->tor);
3584 evcnt_detach(&stats->gorc);
3585 evcnt_detach(&stats->tpr);
3586 evcnt_detach(&stats->gprc);
3587 evcnt_detach(&stats->mprc);
3588 evcnt_detach(&stats->bprc);
3589 evcnt_detach(&stats->prc64);
3590 evcnt_detach(&stats->prc127);
3591 evcnt_detach(&stats->prc255);
3592 evcnt_detach(&stats->prc511);
3593 evcnt_detach(&stats->prc1023);
3594 evcnt_detach(&stats->prc1522);
3595 evcnt_detach(&stats->ruc);
3596 evcnt_detach(&stats->rfc);
3597 evcnt_detach(&stats->roc);
3598 evcnt_detach(&stats->rjc);
3599 evcnt_detach(&stats->mngprc);
3600 evcnt_detach(&stats->mngpdc);
3601 evcnt_detach(&stats->xec);
3602
3603 /* Packet Transmission Stats */
3604 evcnt_detach(&stats->gotc);
3605 evcnt_detach(&stats->tpt);
3606 evcnt_detach(&stats->gptc);
3607 evcnt_detach(&stats->bptc);
3608 evcnt_detach(&stats->mptc);
3609 evcnt_detach(&stats->mngptc);
3610 evcnt_detach(&stats->ptc64);
3611 evcnt_detach(&stats->ptc127);
3612 evcnt_detach(&stats->ptc255);
3613 evcnt_detach(&stats->ptc511);
3614 evcnt_detach(&stats->ptc1023);
3615 evcnt_detach(&stats->ptc1522);
3616
3617 ixgbe_free_transmit_structures(adapter);
3618 ixgbe_free_receive_structures(adapter);
3619 for (int i = 0; i < adapter->num_queues; i++) {
3620 struct ix_queue * que = &adapter->queues[i];
3621 mutex_destroy(&que->dc_mtx);
3622 }
3623 free(adapter->queues, M_DEVBUF);
3624 free(adapter->mta, M_DEVBUF);
3625
3626 IXGBE_CORE_LOCK_DESTROY(adapter);
3627
3628 return (0);
3629 } /* ixgbe_detach */
3630
3631 /************************************************************************
3632 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3633 *
3634 * Prepare the adapter/port for LPLU and/or WoL
3635 ************************************************************************/
3636 static int
3637 ixgbe_setup_low_power_mode(struct adapter *adapter)
3638 {
3639 struct ixgbe_hw *hw = &adapter->hw;
3640 device_t dev = adapter->dev;
3641 s32 error = 0;
3642
3643 KASSERT(mutex_owned(&adapter->core_mtx));
3644
3645 /* Limit power management flow to X550EM baseT */
3646 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3647 hw->phy.ops.enter_lplu) {
3648 /* X550EM baseT adapters need a special LPLU flow */
3649 hw->phy.reset_disable = true;
3650 ixgbe_stop(adapter);
3651 error = hw->phy.ops.enter_lplu(hw);
3652 if (error)
3653 device_printf(dev,
3654 "Error entering LPLU: %d\n", error);
3655 hw->phy.reset_disable = false;
3656 } else {
3657 /* Just stop for other adapters */
3658 ixgbe_stop(adapter);
3659 }
3660
3661 if (!hw->wol_enabled) {
3662 ixgbe_set_phy_power(hw, FALSE);
3663 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3664 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3665 } else {
3666 /* Turn off support for APM wakeup. (Using ACPI instead) */
3667 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3668 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3669
3670 /*
3671 * Clear Wake Up Status register to prevent any previous wakeup
3672 * events from waking us up immediately after we suspend.
3673 */
3674 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3675
3676 /*
3677 * Program the Wakeup Filter Control register with user filter
3678 * settings
3679 */
3680 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3681
3682 /* Enable wakeups and power management in Wakeup Control */
3683 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3684 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3685
3686 }
3687
3688 return error;
3689 } /* ixgbe_setup_low_power_mode */
3690
3691 /************************************************************************
3692 * ixgbe_shutdown - Shutdown entry point
3693 ************************************************************************/
3694 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3695 static int
3696 ixgbe_shutdown(device_t dev)
3697 {
3698 struct adapter *adapter = device_private(dev);
3699 int error = 0;
3700
3701 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3702
3703 IXGBE_CORE_LOCK(adapter);
3704 error = ixgbe_setup_low_power_mode(adapter);
3705 IXGBE_CORE_UNLOCK(adapter);
3706
3707 return (error);
3708 } /* ixgbe_shutdown */
3709 #endif
3710
3711 /************************************************************************
3712 * ixgbe_suspend
3713 *
3714 * From D0 to D3
3715 ************************************************************************/
3716 static bool
3717 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3718 {
3719 struct adapter *adapter = device_private(dev);
3720 int error = 0;
3721
3722 INIT_DEBUGOUT("ixgbe_suspend: begin");
3723
3724 IXGBE_CORE_LOCK(adapter);
3725
3726 error = ixgbe_setup_low_power_mode(adapter);
3727
3728 IXGBE_CORE_UNLOCK(adapter);
3729
3730 return (error);
3731 } /* ixgbe_suspend */
3732
3733 /************************************************************************
3734 * ixgbe_resume
3735 *
3736 * From D3 to D0
3737 ************************************************************************/
3738 static bool
3739 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3740 {
3741 struct adapter *adapter = device_private(dev);
3742 struct ifnet *ifp = adapter->ifp;
3743 struct ixgbe_hw *hw = &adapter->hw;
3744 u32 wus;
3745
3746 INIT_DEBUGOUT("ixgbe_resume: begin");
3747
3748 IXGBE_CORE_LOCK(adapter);
3749
3750 /* Read & clear WUS register */
3751 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3752 if (wus)
3753 device_printf(dev, "Woken up by (WUS): %#010x\n",
3754 IXGBE_READ_REG(hw, IXGBE_WUS));
3755 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3756 /* And clear WUFC until next low-power transition */
3757 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3758
3759 /*
3760 * Required after D3->D0 transition;
3761 * will re-advertise all previous advertised speeds
3762 */
3763 if (ifp->if_flags & IFF_UP)
3764 ixgbe_init_locked(adapter);
3765
3766 IXGBE_CORE_UNLOCK(adapter);
3767
3768 return true;
3769 } /* ixgbe_resume */
3770
3771 /*
3772 * Set the various hardware offload abilities.
3773 *
3774 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3775 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3776 * mbuf offload flags the driver will understand.
3777 */
3778 static void
3779 ixgbe_set_if_hwassist(struct adapter *adapter)
3780 {
3781 /* XXX */
3782 }
3783
3784 /************************************************************************
3785 * ixgbe_init_locked - Init entry point
3786 *
3787 * Used in two ways: It is used by the stack as an init
3788 * entry point in network interface structure. It is also
3789 * used by the driver as a hw/sw initialization routine to
3790 * get to a consistent state.
3791 *
3792 * return 0 on success, positive on failure
3793 ************************************************************************/
3794 static void
3795 ixgbe_init_locked(struct adapter *adapter)
3796 {
3797 struct ifnet *ifp = adapter->ifp;
3798 device_t dev = adapter->dev;
3799 struct ixgbe_hw *hw = &adapter->hw;
3800 struct ix_queue *que;
3801 struct tx_ring *txr;
3802 struct rx_ring *rxr;
3803 u32 txdctl, mhadd;
3804 u32 rxdctl, rxctrl;
3805 u32 ctrl_ext;
3806 int i, j, err;
3807
3808 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3809
3810 KASSERT(mutex_owned(&adapter->core_mtx));
3811 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3812
3813 hw->adapter_stopped = FALSE;
3814 ixgbe_stop_adapter(hw);
3815 callout_stop(&adapter->timer);
3816 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3817 que->disabled_count = 0;
3818
3819 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3820 adapter->max_frame_size =
3821 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3822
3823 /* Queue indices may change with IOV mode */
3824 ixgbe_align_all_queue_indices(adapter);
3825
3826 /* reprogram the RAR[0] in case user changed it. */
3827 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3828
3829 /* Get the latest mac address, User can use a LAA */
3830 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3831 IXGBE_ETH_LENGTH_OF_ADDRESS);
3832 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3833 hw->addr_ctrl.rar_used_count = 1;
3834
3835 /* Set hardware offload abilities from ifnet flags */
3836 ixgbe_set_if_hwassist(adapter);
3837
3838 /* Prepare transmit descriptors and buffers */
3839 if (ixgbe_setup_transmit_structures(adapter)) {
3840 device_printf(dev, "Could not setup transmit structures\n");
3841 ixgbe_stop(adapter);
3842 return;
3843 }
3844
3845 ixgbe_init_hw(hw);
3846
3847 ixgbe_initialize_iov(adapter);
3848
3849 ixgbe_initialize_transmit_units(adapter);
3850
3851 /* Setup Multicast table */
3852 ixgbe_set_multi(adapter);
3853
3854 /* Determine the correct mbuf pool, based on frame size */
3855 if (adapter->max_frame_size <= MCLBYTES)
3856 adapter->rx_mbuf_sz = MCLBYTES;
3857 else
3858 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3859
3860 /* Prepare receive descriptors and buffers */
3861 if (ixgbe_setup_receive_structures(adapter)) {
3862 device_printf(dev, "Could not setup receive structures\n");
3863 ixgbe_stop(adapter);
3864 return;
3865 }
3866
3867 /* Configure RX settings */
3868 ixgbe_initialize_receive_units(adapter);
3869
3870 /* Enable SDP & MSI-X interrupts based on adapter */
3871 ixgbe_config_gpie(adapter);
3872
3873 /* Set MTU size */
3874 if (ifp->if_mtu > ETHERMTU) {
3875 /* aka IXGBE_MAXFRS on 82599 and newer */
3876 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3877 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3878 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3879 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3880 }
3881
3882 /* Now enable all the queues */
3883 for (i = 0; i < adapter->num_queues; i++) {
3884 txr = &adapter->tx_rings[i];
3885 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3886 txdctl |= IXGBE_TXDCTL_ENABLE;
3887 /* Set WTHRESH to 8, burst writeback */
3888 txdctl |= (8 << 16);
3889 /*
3890 * When the internal queue falls below PTHRESH (32),
3891 * start prefetching as long as there are at least
3892 * HTHRESH (1) buffers ready. The values are taken
3893 * from the Intel linux driver 3.8.21.
3894 * Prefetching enables tx line rate even with 1 queue.
3895 */
3896 txdctl |= (32 << 0) | (1 << 8);
3897 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3898 }
3899
3900 for (i = 0; i < adapter->num_queues; i++) {
3901 rxr = &adapter->rx_rings[i];
3902 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3903 if (hw->mac.type == ixgbe_mac_82598EB) {
3904 /*
3905 * PTHRESH = 21
3906 * HTHRESH = 4
3907 * WTHRESH = 8
3908 */
3909 rxdctl &= ~0x3FFFFF;
3910 rxdctl |= 0x080420;
3911 }
3912 rxdctl |= IXGBE_RXDCTL_ENABLE;
3913 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3914 for (j = 0; j < 10; j++) {
3915 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3916 IXGBE_RXDCTL_ENABLE)
3917 break;
3918 else
3919 msec_delay(1);
3920 }
3921 wmb();
3922
3923 /*
3924 * In netmap mode, we must preserve the buffers made
3925 * available to userspace before the if_init()
3926 * (this is true by default on the TX side, because
3927 * init makes all buffers available to userspace).
3928 *
3929 * netmap_reset() and the device specific routines
3930 * (e.g. ixgbe_setup_receive_rings()) map these
3931 * buffers at the end of the NIC ring, so here we
3932 * must set the RDT (tail) register to make sure
3933 * they are not overwritten.
3934 *
3935 * In this driver the NIC ring starts at RDH = 0,
3936 * RDT points to the last slot available for reception (?),
3937 * so RDT = num_rx_desc - 1 means the whole ring is available.
3938 */
3939 #ifdef DEV_NETMAP
3940 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3941 (ifp->if_capenable & IFCAP_NETMAP)) {
3942 struct netmap_adapter *na = NA(adapter->ifp);
3943 struct netmap_kring *kring = &na->rx_rings[i];
3944 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3945
3946 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3947 } else
3948 #endif /* DEV_NETMAP */
3949 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3950 adapter->num_rx_desc - 1);
3951 }
3952
3953 /* Enable Receive engine */
3954 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3955 if (hw->mac.type == ixgbe_mac_82598EB)
3956 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3957 rxctrl |= IXGBE_RXCTRL_RXEN;
3958 ixgbe_enable_rx_dma(hw, rxctrl);
3959
3960 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3961
3962 /* Set up MSI/MSI-X routing */
3963 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3964 ixgbe_configure_ivars(adapter);
3965 /* Set up auto-mask */
3966 if (hw->mac.type == ixgbe_mac_82598EB)
3967 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3968 else {
3969 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3970 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3971 }
3972 } else { /* Simple settings for Legacy/MSI */
3973 ixgbe_set_ivar(adapter, 0, 0, 0);
3974 ixgbe_set_ivar(adapter, 0, 0, 1);
3975 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3976 }
3977
3978 ixgbe_init_fdir(adapter);
3979
3980 /*
3981 * Check on any SFP devices that
3982 * need to be kick-started
3983 */
3984 if (hw->phy.type == ixgbe_phy_none) {
3985 err = hw->phy.ops.identify(hw);
3986 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3987 device_printf(dev,
3988 "Unsupported SFP+ module type was detected.\n");
3989 return;
3990 }
3991 }
3992
3993 /* Set moderation on the Link interrupt */
3994 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
3995
3996 /* Enable power to the phy. */
3997 ixgbe_set_phy_power(hw, TRUE);
3998
3999 /* Config/Enable Link */
4000 ixgbe_config_link(adapter);
4001
4002 /* Hardware Packet Buffer & Flow Control setup */
4003 ixgbe_config_delay_values(adapter);
4004
4005 /* Initialize the FC settings */
4006 ixgbe_start_hw(hw);
4007
4008 /* Set up VLAN support and filter */
4009 ixgbe_setup_vlan_hw_support(adapter);
4010
4011 /* Setup DMA Coalescing */
4012 ixgbe_config_dmac(adapter);
4013
4014 /* And now turn on interrupts */
4015 ixgbe_enable_intr(adapter);
4016
4017 /* Enable the use of the MBX by the VF's */
4018 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4019 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4020 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4021 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4022 }
4023
4024 /* Update saved flags. See ixgbe_ifflags_cb() */
4025 adapter->if_flags = ifp->if_flags;
4026
4027 /* Now inform the stack we're ready */
4028 ifp->if_flags |= IFF_RUNNING;
4029
4030 return;
4031 } /* ixgbe_init_locked */
4032
4033 /************************************************************************
4034 * ixgbe_init
4035 ************************************************************************/
4036 static int
4037 ixgbe_init(struct ifnet *ifp)
4038 {
4039 struct adapter *adapter = ifp->if_softc;
4040
4041 IXGBE_CORE_LOCK(adapter);
4042 ixgbe_init_locked(adapter);
4043 IXGBE_CORE_UNLOCK(adapter);
4044
4045 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4046 } /* ixgbe_init */
4047
4048 /************************************************************************
4049 * ixgbe_set_ivar
4050 *
4051 * Setup the correct IVAR register for a particular MSI-X interrupt
4052 * (yes this is all very magic and confusing :)
4053 * - entry is the register array entry
4054 * - vector is the MSI-X vector for this queue
4055 * - type is RX/TX/MISC
4056 ************************************************************************/
4057 static void
4058 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4059 {
4060 struct ixgbe_hw *hw = &adapter->hw;
4061 u32 ivar, index;
4062
4063 vector |= IXGBE_IVAR_ALLOC_VAL;
4064
4065 switch (hw->mac.type) {
4066 case ixgbe_mac_82598EB:
4067 if (type == -1)
4068 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4069 else
4070 entry += (type * 64);
4071 index = (entry >> 2) & 0x1F;
4072 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4073 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4074 ivar |= (vector << (8 * (entry & 0x3)));
4075 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4076 break;
4077 case ixgbe_mac_82599EB:
4078 case ixgbe_mac_X540:
4079 case ixgbe_mac_X550:
4080 case ixgbe_mac_X550EM_x:
4081 case ixgbe_mac_X550EM_a:
4082 if (type == -1) { /* MISC IVAR */
4083 index = (entry & 1) * 8;
4084 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4085 ivar &= ~(0xFF << index);
4086 ivar |= (vector << index);
4087 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4088 } else { /* RX/TX IVARS */
4089 index = (16 * (entry & 1)) + (8 * type);
4090 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4091 ivar &= ~(0xFF << index);
4092 ivar |= (vector << index);
4093 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4094 }
4095 break;
4096 default:
4097 break;
4098 }
4099 } /* ixgbe_set_ivar */
4100
4101 /************************************************************************
4102 * ixgbe_configure_ivars
4103 ************************************************************************/
4104 static void
4105 ixgbe_configure_ivars(struct adapter *adapter)
4106 {
4107 struct ix_queue *que = adapter->queues;
4108 u32 newitr;
4109
4110 if (ixgbe_max_interrupt_rate > 0)
4111 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4112 else {
4113 /*
4114 * Disable DMA coalescing if interrupt moderation is
4115 * disabled.
4116 */
4117 adapter->dmac = 0;
4118 newitr = 0;
4119 }
4120
4121 for (int i = 0; i < adapter->num_queues; i++, que++) {
4122 struct rx_ring *rxr = &adapter->rx_rings[i];
4123 struct tx_ring *txr = &adapter->tx_rings[i];
4124 /* First the RX queue entry */
4125 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4126 /* ... and the TX */
4127 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4128 /* Set an Initial EITR value */
4129 ixgbe_eitr_write(adapter, que->msix, newitr);
4130 /*
4131 * To eliminate influence of the previous state.
4132 * At this point, Tx/Rx interrupt handler
4133 * (ixgbe_msix_que()) cannot be called, so both
4134 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4135 */
4136 que->eitr_setting = 0;
4137 }
4138
4139 /* For the Link interrupt */
4140 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4141 } /* ixgbe_configure_ivars */
4142
4143 /************************************************************************
4144 * ixgbe_config_gpie
4145 ************************************************************************/
4146 static void
4147 ixgbe_config_gpie(struct adapter *adapter)
4148 {
4149 struct ixgbe_hw *hw = &adapter->hw;
4150 u32 gpie;
4151
4152 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4153
4154 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4155 /* Enable Enhanced MSI-X mode */
4156 gpie |= IXGBE_GPIE_MSIX_MODE
4157 | IXGBE_GPIE_EIAME
4158 | IXGBE_GPIE_PBA_SUPPORT
4159 | IXGBE_GPIE_OCD;
4160 }
4161
4162 /* Fan Failure Interrupt */
4163 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4164 gpie |= IXGBE_SDP1_GPIEN;
4165
4166 /* Thermal Sensor Interrupt */
4167 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4168 gpie |= IXGBE_SDP0_GPIEN_X540;
4169
4170 /* Link detection */
4171 switch (hw->mac.type) {
4172 case ixgbe_mac_82599EB:
4173 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4174 break;
4175 case ixgbe_mac_X550EM_x:
4176 case ixgbe_mac_X550EM_a:
4177 gpie |= IXGBE_SDP0_GPIEN_X540;
4178 break;
4179 default:
4180 break;
4181 }
4182
4183 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4184
4185 } /* ixgbe_config_gpie */
4186
4187 /************************************************************************
4188 * ixgbe_config_delay_values
4189 *
4190 * Requires adapter->max_frame_size to be set.
4191 ************************************************************************/
4192 static void
4193 ixgbe_config_delay_values(struct adapter *adapter)
4194 {
4195 struct ixgbe_hw *hw = &adapter->hw;
4196 u32 rxpb, frame, size, tmp;
4197
4198 frame = adapter->max_frame_size;
4199
4200 /* Calculate High Water */
4201 switch (hw->mac.type) {
4202 case ixgbe_mac_X540:
4203 case ixgbe_mac_X550:
4204 case ixgbe_mac_X550EM_x:
4205 case ixgbe_mac_X550EM_a:
4206 tmp = IXGBE_DV_X540(frame, frame);
4207 break;
4208 default:
4209 tmp = IXGBE_DV(frame, frame);
4210 break;
4211 }
4212 size = IXGBE_BT2KB(tmp);
4213 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4214 hw->fc.high_water[0] = rxpb - size;
4215
4216 /* Now calculate Low Water */
4217 switch (hw->mac.type) {
4218 case ixgbe_mac_X540:
4219 case ixgbe_mac_X550:
4220 case ixgbe_mac_X550EM_x:
4221 case ixgbe_mac_X550EM_a:
4222 tmp = IXGBE_LOW_DV_X540(frame);
4223 break;
4224 default:
4225 tmp = IXGBE_LOW_DV(frame);
4226 break;
4227 }
4228 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4229
4230 hw->fc.pause_time = IXGBE_FC_PAUSE;
4231 hw->fc.send_xon = TRUE;
4232 } /* ixgbe_config_delay_values */
4233
4234 /************************************************************************
4235 * ixgbe_set_multi - Multicast Update
4236 *
4237 * Called whenever multicast address list is updated.
4238 ************************************************************************/
4239 static void
4240 ixgbe_set_multi(struct adapter *adapter)
4241 {
4242 struct ixgbe_mc_addr *mta;
4243 struct ifnet *ifp = adapter->ifp;
4244 u8 *update_ptr;
4245 int mcnt = 0;
4246 u32 fctrl;
4247 struct ethercom *ec = &adapter->osdep.ec;
4248 struct ether_multi *enm;
4249 struct ether_multistep step;
4250
4251 KASSERT(mutex_owned(&adapter->core_mtx));
4252 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4253
4254 mta = adapter->mta;
4255 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4256
4257 ifp->if_flags &= ~IFF_ALLMULTI;
4258 ETHER_LOCK(ec);
4259 ETHER_FIRST_MULTI(step, ec, enm);
4260 while (enm != NULL) {
4261 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4262 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4263 ETHER_ADDR_LEN) != 0)) {
4264 ifp->if_flags |= IFF_ALLMULTI;
4265 break;
4266 }
4267 bcopy(enm->enm_addrlo,
4268 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4269 mta[mcnt].vmdq = adapter->pool;
4270 mcnt++;
4271 ETHER_NEXT_MULTI(step, enm);
4272 }
4273 ETHER_UNLOCK(ec);
4274
4275 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4276 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4277 if (ifp->if_flags & IFF_PROMISC)
4278 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4279 else if (ifp->if_flags & IFF_ALLMULTI) {
4280 fctrl |= IXGBE_FCTRL_MPE;
4281 }
4282
4283 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4284
4285 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4286 update_ptr = (u8 *)mta;
4287 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4288 ixgbe_mc_array_itr, TRUE);
4289 }
4290
4291 } /* ixgbe_set_multi */
4292
4293 /************************************************************************
4294 * ixgbe_mc_array_itr
4295 *
4296 * An iterator function needed by the multicast shared code.
4297 * It feeds the shared code routine the addresses in the
4298 * array of ixgbe_set_multi() one by one.
4299 ************************************************************************/
4300 static u8 *
4301 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4302 {
4303 struct ixgbe_mc_addr *mta;
4304
4305 mta = (struct ixgbe_mc_addr *)*update_ptr;
4306 *vmdq = mta->vmdq;
4307
4308 *update_ptr = (u8*)(mta + 1);
4309
4310 return (mta->addr);
4311 } /* ixgbe_mc_array_itr */
4312
4313 /************************************************************************
4314 * ixgbe_local_timer - Timer routine
4315 *
4316 * Checks for link status, updates statistics,
4317 * and runs the watchdog check.
4318 ************************************************************************/
4319 static void
4320 ixgbe_local_timer(void *arg)
4321 {
4322 struct adapter *adapter = arg;
4323
4324 IXGBE_CORE_LOCK(adapter);
4325 ixgbe_local_timer1(adapter);
4326 IXGBE_CORE_UNLOCK(adapter);
4327 }
4328
4329 static void
4330 ixgbe_local_timer1(void *arg)
4331 {
4332 struct adapter *adapter = arg;
4333 device_t dev = adapter->dev;
4334 struct ix_queue *que = adapter->queues;
4335 u64 queues = 0;
4336 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4337 int hung = 0;
4338 int i;
4339
4340 KASSERT(mutex_owned(&adapter->core_mtx));
4341
4342 /* Check for pluggable optics */
4343 if (adapter->sfp_probe)
4344 if (!ixgbe_sfp_probe(adapter))
4345 goto out; /* Nothing to do */
4346
4347 ixgbe_update_link_status(adapter);
4348 ixgbe_update_stats_counters(adapter);
4349
4350 /* Update some event counters */
4351 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4352 que = adapter->queues;
4353 for (i = 0; i < adapter->num_queues; i++, que++) {
4354 struct tx_ring *txr = que->txr;
4355
4356 v0 += txr->q_efbig_tx_dma_setup;
4357 v1 += txr->q_mbuf_defrag_failed;
4358 v2 += txr->q_efbig2_tx_dma_setup;
4359 v3 += txr->q_einval_tx_dma_setup;
4360 v4 += txr->q_other_tx_dma_setup;
4361 v5 += txr->q_eagain_tx_dma_setup;
4362 v6 += txr->q_enomem_tx_dma_setup;
4363 v7 += txr->q_tso_err;
4364 }
4365 adapter->efbig_tx_dma_setup.ev_count = v0;
4366 adapter->mbuf_defrag_failed.ev_count = v1;
4367 adapter->efbig2_tx_dma_setup.ev_count = v2;
4368 adapter->einval_tx_dma_setup.ev_count = v3;
4369 adapter->other_tx_dma_setup.ev_count = v4;
4370 adapter->eagain_tx_dma_setup.ev_count = v5;
4371 adapter->enomem_tx_dma_setup.ev_count = v6;
4372 adapter->tso_err.ev_count = v7;
4373
4374 /*
4375 * Check the TX queues status
4376 * - mark hung queues so we don't schedule on them
4377 * - watchdog only if all queues show hung
4378 */
4379 que = adapter->queues;
4380 for (i = 0; i < adapter->num_queues; i++, que++) {
4381 /* Keep track of queues with work for soft irq */
4382 if (que->txr->busy)
4383 queues |= ((u64)1 << que->me);
4384 /*
4385 * Each time txeof runs without cleaning, but there
4386 * are uncleaned descriptors it increments busy. If
4387 * we get to the MAX we declare it hung.
4388 */
4389 if (que->busy == IXGBE_QUEUE_HUNG) {
4390 ++hung;
4391 /* Mark the queue as inactive */
4392 adapter->active_queues &= ~((u64)1 << que->me);
4393 continue;
4394 } else {
4395 /* Check if we've come back from hung */
4396 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4397 adapter->active_queues |= ((u64)1 << que->me);
4398 }
4399 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4400 device_printf(dev,
4401 "Warning queue %d appears to be hung!\n", i);
4402 que->txr->busy = IXGBE_QUEUE_HUNG;
4403 ++hung;
4404 }
4405 }
4406
4407 /* Only truely watchdog if all queues show hung */
4408 if (hung == adapter->num_queues)
4409 goto watchdog;
4410 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4411 else if (queues != 0) { /* Force an IRQ on queues with work */
4412 que = adapter->queues;
4413 for (i = 0; i < adapter->num_queues; i++, que++) {
4414 mutex_enter(&que->dc_mtx);
4415 if (que->disabled_count == 0)
4416 ixgbe_rearm_queues(adapter,
4417 queues & ((u64)1 << i));
4418 mutex_exit(&que->dc_mtx);
4419 }
4420 }
4421 #endif
4422
4423 out:
4424 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4425 return;
4426
4427 watchdog:
4428 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4429 adapter->ifp->if_flags &= ~IFF_RUNNING;
4430 adapter->watchdog_events.ev_count++;
4431 ixgbe_init_locked(adapter);
4432 } /* ixgbe_local_timer */
4433
4434 /************************************************************************
4435 * ixgbe_sfp_probe
4436 *
4437 * Determine if a port had optics inserted.
4438 ************************************************************************/
4439 static bool
4440 ixgbe_sfp_probe(struct adapter *adapter)
4441 {
4442 struct ixgbe_hw *hw = &adapter->hw;
4443 device_t dev = adapter->dev;
4444 bool result = FALSE;
4445
4446 if ((hw->phy.type == ixgbe_phy_nl) &&
4447 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4448 s32 ret = hw->phy.ops.identify_sfp(hw);
4449 if (ret)
4450 goto out;
4451 ret = hw->phy.ops.reset(hw);
4452 adapter->sfp_probe = FALSE;
4453 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4454 device_printf(dev,"Unsupported SFP+ module detected!");
4455 device_printf(dev,
4456 "Reload driver with supported module.\n");
4457 goto out;
4458 } else
4459 device_printf(dev, "SFP+ module detected!\n");
4460 /* We now have supported optics */
4461 result = TRUE;
4462 }
4463 out:
4464
4465 return (result);
4466 } /* ixgbe_sfp_probe */
4467
4468 /************************************************************************
4469 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4470 ************************************************************************/
4471 static void
4472 ixgbe_handle_mod(void *context)
4473 {
4474 struct adapter *adapter = context;
4475 struct ixgbe_hw *hw = &adapter->hw;
4476 device_t dev = adapter->dev;
4477 u32 err, cage_full = 0;
4478
4479 ++adapter->mod_sicount.ev_count;
4480 if (adapter->hw.need_crosstalk_fix) {
4481 switch (hw->mac.type) {
4482 case ixgbe_mac_82599EB:
4483 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4484 IXGBE_ESDP_SDP2;
4485 break;
4486 case ixgbe_mac_X550EM_x:
4487 case ixgbe_mac_X550EM_a:
4488 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4489 IXGBE_ESDP_SDP0;
4490 break;
4491 default:
4492 break;
4493 }
4494
4495 if (!cage_full)
4496 return;
4497 }
4498
4499 err = hw->phy.ops.identify_sfp(hw);
4500 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4501 device_printf(dev,
4502 "Unsupported SFP+ module type was detected.\n");
4503 return;
4504 }
4505
4506 if (hw->mac.type == ixgbe_mac_82598EB)
4507 err = hw->phy.ops.reset(hw);
4508 else
4509 err = hw->mac.ops.setup_sfp(hw);
4510
4511 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4512 device_printf(dev,
4513 "Setup failure - unsupported SFP+ module type.\n");
4514 return;
4515 }
4516 softint_schedule(adapter->msf_si);
4517 } /* ixgbe_handle_mod */
4518
4519
4520 /************************************************************************
4521 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4522 ************************************************************************/
4523 static void
4524 ixgbe_handle_msf(void *context)
4525 {
4526 struct adapter *adapter = context;
4527 struct ixgbe_hw *hw = &adapter->hw;
4528 u32 autoneg;
4529 bool negotiate;
4530
4531 ++adapter->msf_sicount.ev_count;
4532 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4533 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4534
4535 autoneg = hw->phy.autoneg_advertised;
4536 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4537 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4538 else
4539 negotiate = 0;
4540 if (hw->mac.ops.setup_link)
4541 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4542
4543 /* Adjust media types shown in ifconfig */
4544 ifmedia_removeall(&adapter->media);
4545 ixgbe_add_media_types(adapter);
4546 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4547 } /* ixgbe_handle_msf */
4548
4549 /************************************************************************
4550 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4551 ************************************************************************/
4552 static void
4553 ixgbe_handle_phy(void *context)
4554 {
4555 struct adapter *adapter = context;
4556 struct ixgbe_hw *hw = &adapter->hw;
4557 int error;
4558
4559 ++adapter->phy_sicount.ev_count;
4560 error = hw->phy.ops.handle_lasi(hw);
4561 if (error == IXGBE_ERR_OVERTEMP)
4562 device_printf(adapter->dev,
4563 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4564 " PHY will downshift to lower power state!\n");
4565 else if (error)
4566 device_printf(adapter->dev,
4567 "Error handling LASI interrupt: %d\n", error);
4568 } /* ixgbe_handle_phy */
4569
4570 static void
4571 ixgbe_ifstop(struct ifnet *ifp, int disable)
4572 {
4573 struct adapter *adapter = ifp->if_softc;
4574
4575 IXGBE_CORE_LOCK(adapter);
4576 ixgbe_stop(adapter);
4577 IXGBE_CORE_UNLOCK(adapter);
4578 }
4579
4580 /************************************************************************
4581 * ixgbe_stop - Stop the hardware
4582 *
4583 * Disables all traffic on the adapter by issuing a
4584 * global reset on the MAC and deallocates TX/RX buffers.
4585 ************************************************************************/
4586 static void
4587 ixgbe_stop(void *arg)
4588 {
4589 struct ifnet *ifp;
4590 struct adapter *adapter = arg;
4591 struct ixgbe_hw *hw = &adapter->hw;
4592
4593 ifp = adapter->ifp;
4594
4595 KASSERT(mutex_owned(&adapter->core_mtx));
4596
4597 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4598 ixgbe_disable_intr(adapter);
4599 callout_stop(&adapter->timer);
4600
4601 /* Let the stack know...*/
4602 ifp->if_flags &= ~IFF_RUNNING;
4603
4604 ixgbe_reset_hw(hw);
4605 hw->adapter_stopped = FALSE;
4606 ixgbe_stop_adapter(hw);
4607 if (hw->mac.type == ixgbe_mac_82599EB)
4608 ixgbe_stop_mac_link_on_d3_82599(hw);
4609 /* Turn off the laser - noop with no optics */
4610 ixgbe_disable_tx_laser(hw);
4611
4612 /* Update the stack */
4613 adapter->link_up = FALSE;
4614 ixgbe_update_link_status(adapter);
4615
4616 /* reprogram the RAR[0] in case user changed it. */
4617 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4618
4619 return;
4620 } /* ixgbe_stop */
4621
4622 /************************************************************************
4623 * ixgbe_update_link_status - Update OS on link state
4624 *
4625 * Note: Only updates the OS on the cached link state.
4626 * The real check of the hardware only happens with
4627 * a link interrupt.
4628 ************************************************************************/
4629 static void
4630 ixgbe_update_link_status(struct adapter *adapter)
4631 {
4632 struct ifnet *ifp = adapter->ifp;
4633 device_t dev = adapter->dev;
4634 struct ixgbe_hw *hw = &adapter->hw;
4635
4636 KASSERT(mutex_owned(&adapter->core_mtx));
4637
4638 if (adapter->link_up) {
4639 if (adapter->link_active == FALSE) {
4640 /*
4641 * To eliminate influence of the previous state
4642 * in the same way as ixgbe_init_locked().
4643 */
4644 struct ix_queue *que = adapter->queues;
4645 for (int i = 0; i < adapter->num_queues; i++, que++)
4646 que->eitr_setting = 0;
4647
4648 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4649 /*
4650 * Discard count for both MAC Local Fault and
4651 * Remote Fault because those registers are
4652 * valid only when the link speed is up and
4653 * 10Gbps.
4654 */
4655 IXGBE_READ_REG(hw, IXGBE_MLFC);
4656 IXGBE_READ_REG(hw, IXGBE_MRFC);
4657 }
4658
4659 if (bootverbose) {
4660 const char *bpsmsg;
4661
4662 switch (adapter->link_speed) {
4663 case IXGBE_LINK_SPEED_10GB_FULL:
4664 bpsmsg = "10 Gbps";
4665 break;
4666 case IXGBE_LINK_SPEED_5GB_FULL:
4667 bpsmsg = "5 Gbps";
4668 break;
4669 case IXGBE_LINK_SPEED_2_5GB_FULL:
4670 bpsmsg = "2.5 Gbps";
4671 break;
4672 case IXGBE_LINK_SPEED_1GB_FULL:
4673 bpsmsg = "1 Gbps";
4674 break;
4675 case IXGBE_LINK_SPEED_100_FULL:
4676 bpsmsg = "100 Mbps";
4677 break;
4678 case IXGBE_LINK_SPEED_10_FULL:
4679 bpsmsg = "10 Mbps";
4680 break;
4681 default:
4682 bpsmsg = "unknown speed";
4683 break;
4684 }
4685 device_printf(dev, "Link is up %s %s \n",
4686 bpsmsg, "Full Duplex");
4687 }
4688 adapter->link_active = TRUE;
4689 /* Update any Flow Control changes */
4690 ixgbe_fc_enable(&adapter->hw);
4691 /* Update DMA coalescing config */
4692 ixgbe_config_dmac(adapter);
4693 if_link_state_change(ifp, LINK_STATE_UP);
4694
4695 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4696 ixgbe_ping_all_vfs(adapter);
4697 }
4698 } else { /* Link down */
4699 if (adapter->link_active == TRUE) {
4700 if (bootverbose)
4701 device_printf(dev, "Link is Down\n");
4702 if_link_state_change(ifp, LINK_STATE_DOWN);
4703 adapter->link_active = FALSE;
4704 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4705 ixgbe_ping_all_vfs(adapter);
4706 ixgbe_drain_all(adapter);
4707 }
4708 }
4709 } /* ixgbe_update_link_status */
4710
4711 /************************************************************************
4712 * ixgbe_config_dmac - Configure DMA Coalescing
4713 ************************************************************************/
4714 static void
4715 ixgbe_config_dmac(struct adapter *adapter)
4716 {
4717 struct ixgbe_hw *hw = &adapter->hw;
4718 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4719
4720 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4721 return;
4722
4723 if (dcfg->watchdog_timer ^ adapter->dmac ||
4724 dcfg->link_speed ^ adapter->link_speed) {
4725 dcfg->watchdog_timer = adapter->dmac;
4726 dcfg->fcoe_en = false;
4727 dcfg->link_speed = adapter->link_speed;
4728 dcfg->num_tcs = 1;
4729
4730 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4731 dcfg->watchdog_timer, dcfg->link_speed);
4732
4733 hw->mac.ops.dmac_config(hw);
4734 }
4735 } /* ixgbe_config_dmac */
4736
4737 /************************************************************************
4738 * ixgbe_enable_intr
4739 ************************************************************************/
4740 static void
4741 ixgbe_enable_intr(struct adapter *adapter)
4742 {
4743 struct ixgbe_hw *hw = &adapter->hw;
4744 struct ix_queue *que = adapter->queues;
4745 u32 mask, fwsm;
4746
4747 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4748
4749 switch (adapter->hw.mac.type) {
4750 case ixgbe_mac_82599EB:
4751 mask |= IXGBE_EIMS_ECC;
4752 /* Temperature sensor on some adapters */
4753 mask |= IXGBE_EIMS_GPI_SDP0;
4754 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4755 mask |= IXGBE_EIMS_GPI_SDP1;
4756 mask |= IXGBE_EIMS_GPI_SDP2;
4757 break;
4758 case ixgbe_mac_X540:
4759 /* Detect if Thermal Sensor is enabled */
4760 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4761 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4762 mask |= IXGBE_EIMS_TS;
4763 mask |= IXGBE_EIMS_ECC;
4764 break;
4765 case ixgbe_mac_X550:
4766 /* MAC thermal sensor is automatically enabled */
4767 mask |= IXGBE_EIMS_TS;
4768 mask |= IXGBE_EIMS_ECC;
4769 break;
4770 case ixgbe_mac_X550EM_x:
4771 case ixgbe_mac_X550EM_a:
4772 /* Some devices use SDP0 for important information */
4773 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4774 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4775 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4776 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4777 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4778 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4779 mask |= IXGBE_EICR_GPI_SDP0_X540;
4780 mask |= IXGBE_EIMS_ECC;
4781 break;
4782 default:
4783 break;
4784 }
4785
4786 /* Enable Fan Failure detection */
4787 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4788 mask |= IXGBE_EIMS_GPI_SDP1;
4789 /* Enable SR-IOV */
4790 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4791 mask |= IXGBE_EIMS_MAILBOX;
4792 /* Enable Flow Director */
4793 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4794 mask |= IXGBE_EIMS_FLOW_DIR;
4795
4796 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4797
4798 /* With MSI-X we use auto clear */
4799 if (adapter->msix_mem) {
4800 mask = IXGBE_EIMS_ENABLE_MASK;
4801 /* Don't autoclear Link */
4802 mask &= ~IXGBE_EIMS_OTHER;
4803 mask &= ~IXGBE_EIMS_LSC;
4804 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4805 mask &= ~IXGBE_EIMS_MAILBOX;
4806 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4807 }
4808
4809 /*
4810 * Now enable all queues, this is done separately to
4811 * allow for handling the extended (beyond 32) MSI-X
4812 * vectors that can be used by 82599
4813 */
4814 for (int i = 0; i < adapter->num_queues; i++, que++)
4815 ixgbe_enable_queue(adapter, que->msix);
4816
4817 IXGBE_WRITE_FLUSH(hw);
4818
4819 } /* ixgbe_enable_intr */
4820
4821 /************************************************************************
4822 * ixgbe_disable_intr_internal
4823 ************************************************************************/
4824 static void
4825 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4826 {
4827 struct ix_queue *que = adapter->queues;
4828
4829 /* disable interrupts other than queues */
4830 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4831
4832 if (adapter->msix_mem)
4833 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4834
4835 for (int i = 0; i < adapter->num_queues; i++, que++)
4836 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4837
4838 IXGBE_WRITE_FLUSH(&adapter->hw);
4839
4840 } /* ixgbe_do_disable_intr_internal */
4841
4842 /************************************************************************
4843 * ixgbe_disable_intr
4844 ************************************************************************/
4845 static void
4846 ixgbe_disable_intr(struct adapter *adapter)
4847 {
4848
4849 ixgbe_disable_intr_internal(adapter, true);
4850 } /* ixgbe_disable_intr */
4851
4852 /************************************************************************
4853 * ixgbe_ensure_disabled_intr
4854 ************************************************************************/
4855 void
4856 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4857 {
4858
4859 ixgbe_disable_intr_internal(adapter, false);
4860 } /* ixgbe_ensure_disabled_intr */
4861
4862 /************************************************************************
4863 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4864 ************************************************************************/
4865 static int
4866 ixgbe_legacy_irq(void *arg)
4867 {
4868 struct ix_queue *que = arg;
4869 struct adapter *adapter = que->adapter;
4870 struct ixgbe_hw *hw = &adapter->hw;
4871 struct ifnet *ifp = adapter->ifp;
4872 struct tx_ring *txr = adapter->tx_rings;
4873 bool more = false;
4874 u32 eicr, eicr_mask;
4875
4876 /* Silicon errata #26 on 82598 */
4877 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4878
4879 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4880
4881 adapter->stats.pf.legint.ev_count++;
4882 ++que->irqs.ev_count;
4883 if (eicr == 0) {
4884 adapter->stats.pf.intzero.ev_count++;
4885 if ((ifp->if_flags & IFF_UP) != 0)
4886 ixgbe_enable_intr(adapter);
4887 return 0;
4888 }
4889
4890 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4891 /*
4892 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
4893 */
4894 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
4895
4896 #ifdef __NetBSD__
4897 /* Don't run ixgbe_rxeof in interrupt context */
4898 more = true;
4899 #else
4900 more = ixgbe_rxeof(que);
4901 #endif
4902
4903 IXGBE_TX_LOCK(txr);
4904 ixgbe_txeof(txr);
4905 #ifdef notyet
4906 if (!ixgbe_ring_empty(ifp, txr->br))
4907 ixgbe_start_locked(ifp, txr);
4908 #endif
4909 IXGBE_TX_UNLOCK(txr);
4910 }
4911
4912 /* Check for fan failure */
4913 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4914 ixgbe_check_fan_failure(adapter, eicr, true);
4915 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4916 }
4917
4918 /* Link status change */
4919 if (eicr & IXGBE_EICR_LSC)
4920 softint_schedule(adapter->link_si);
4921
4922 if (ixgbe_is_sfp(hw)) {
4923 /* Pluggable optics-related interrupt */
4924 if (hw->mac.type >= ixgbe_mac_X540)
4925 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4926 else
4927 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4928
4929 if (eicr & eicr_mask) {
4930 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4931 softint_schedule(adapter->mod_si);
4932 }
4933
4934 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4935 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4936 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4937 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4938 softint_schedule(adapter->msf_si);
4939 }
4940 }
4941
4942 /* External PHY interrupt */
4943 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4944 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4945 softint_schedule(adapter->phy_si);
4946
4947 if (more) {
4948 que->req.ev_count++;
4949 ixgbe_sched_handle_que(adapter, que);
4950 } else
4951 ixgbe_enable_intr(adapter);
4952
4953 return 1;
4954 } /* ixgbe_legacy_irq */
4955
4956 /************************************************************************
4957 * ixgbe_free_pciintr_resources
4958 ************************************************************************/
4959 static void
4960 ixgbe_free_pciintr_resources(struct adapter *adapter)
4961 {
4962 struct ix_queue *que = adapter->queues;
4963 int rid;
4964
4965 /*
4966 * Release all msix queue resources:
4967 */
4968 for (int i = 0; i < adapter->num_queues; i++, que++) {
4969 if (que->res != NULL) {
4970 pci_intr_disestablish(adapter->osdep.pc,
4971 adapter->osdep.ihs[i]);
4972 adapter->osdep.ihs[i] = NULL;
4973 }
4974 }
4975
4976 /* Clean the Legacy or Link interrupt last */
4977 if (adapter->vector) /* we are doing MSIX */
4978 rid = adapter->vector;
4979 else
4980 rid = 0;
4981
4982 if (adapter->osdep.ihs[rid] != NULL) {
4983 pci_intr_disestablish(adapter->osdep.pc,
4984 adapter->osdep.ihs[rid]);
4985 adapter->osdep.ihs[rid] = NULL;
4986 }
4987
4988 if (adapter->osdep.intrs != NULL) {
4989 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4990 adapter->osdep.nintrs);
4991 adapter->osdep.intrs = NULL;
4992 }
4993 } /* ixgbe_free_pciintr_resources */
4994
4995 /************************************************************************
4996 * ixgbe_free_pci_resources
4997 ************************************************************************/
4998 static void
4999 ixgbe_free_pci_resources(struct adapter *adapter)
5000 {
5001
5002 ixgbe_free_pciintr_resources(adapter);
5003
5004 if (adapter->osdep.mem_size != 0) {
5005 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5006 adapter->osdep.mem_bus_space_handle,
5007 adapter->osdep.mem_size);
5008 }
5009
5010 } /* ixgbe_free_pci_resources */
5011
5012 /************************************************************************
5013 * ixgbe_set_sysctl_value
5014 ************************************************************************/
5015 static void
5016 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5017 const char *description, int *limit, int value)
5018 {
5019 device_t dev = adapter->dev;
5020 struct sysctllog **log;
5021 const struct sysctlnode *rnode, *cnode;
5022
5023 log = &adapter->sysctllog;
5024 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5025 aprint_error_dev(dev, "could not create sysctl root\n");
5026 return;
5027 }
5028 if (sysctl_createv(log, 0, &rnode, &cnode,
5029 CTLFLAG_READWRITE, CTLTYPE_INT,
5030 name, SYSCTL_DESCR(description),
5031 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5032 aprint_error_dev(dev, "could not create sysctl\n");
5033 *limit = value;
5034 } /* ixgbe_set_sysctl_value */
5035
5036 /************************************************************************
5037 * ixgbe_sysctl_flowcntl
5038 *
5039 * SYSCTL wrapper around setting Flow Control
5040 ************************************************************************/
5041 static int
5042 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5043 {
5044 struct sysctlnode node = *rnode;
5045 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5046 int error, fc;
5047
5048 fc = adapter->hw.fc.current_mode;
5049 node.sysctl_data = &fc;
5050 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5051 if (error != 0 || newp == NULL)
5052 return error;
5053
5054 /* Don't bother if it's not changed */
5055 if (fc == adapter->hw.fc.current_mode)
5056 return (0);
5057
5058 return ixgbe_set_flowcntl(adapter, fc);
5059 } /* ixgbe_sysctl_flowcntl */
5060
5061 /************************************************************************
5062 * ixgbe_set_flowcntl - Set flow control
5063 *
5064 * Flow control values:
5065 * 0 - off
5066 * 1 - rx pause
5067 * 2 - tx pause
5068 * 3 - full
5069 ************************************************************************/
5070 static int
5071 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5072 {
5073 switch (fc) {
5074 case ixgbe_fc_rx_pause:
5075 case ixgbe_fc_tx_pause:
5076 case ixgbe_fc_full:
5077 adapter->hw.fc.requested_mode = fc;
5078 if (adapter->num_queues > 1)
5079 ixgbe_disable_rx_drop(adapter);
5080 break;
5081 case ixgbe_fc_none:
5082 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5083 if (adapter->num_queues > 1)
5084 ixgbe_enable_rx_drop(adapter);
5085 break;
5086 default:
5087 return (EINVAL);
5088 }
5089
5090 #if 0 /* XXX NetBSD */
5091 /* Don't autoneg if forcing a value */
5092 adapter->hw.fc.disable_fc_autoneg = TRUE;
5093 #endif
5094 ixgbe_fc_enable(&adapter->hw);
5095
5096 return (0);
5097 } /* ixgbe_set_flowcntl */
5098
5099 /************************************************************************
5100 * ixgbe_enable_rx_drop
5101 *
5102 * Enable the hardware to drop packets when the buffer is
5103 * full. This is useful with multiqueue, so that no single
5104 * queue being full stalls the entire RX engine. We only
5105 * enable this when Multiqueue is enabled AND Flow Control
5106 * is disabled.
5107 ************************************************************************/
5108 static void
5109 ixgbe_enable_rx_drop(struct adapter *adapter)
5110 {
5111 struct ixgbe_hw *hw = &adapter->hw;
5112 struct rx_ring *rxr;
5113 u32 srrctl;
5114
5115 for (int i = 0; i < adapter->num_queues; i++) {
5116 rxr = &adapter->rx_rings[i];
5117 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5118 srrctl |= IXGBE_SRRCTL_DROP_EN;
5119 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5120 }
5121
5122 /* enable drop for each vf */
5123 for (int i = 0; i < adapter->num_vfs; i++) {
5124 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5125 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5126 IXGBE_QDE_ENABLE));
5127 }
5128 } /* ixgbe_enable_rx_drop */
5129
5130 /************************************************************************
5131 * ixgbe_disable_rx_drop
5132 ************************************************************************/
5133 static void
5134 ixgbe_disable_rx_drop(struct adapter *adapter)
5135 {
5136 struct ixgbe_hw *hw = &adapter->hw;
5137 struct rx_ring *rxr;
5138 u32 srrctl;
5139
5140 for (int i = 0; i < adapter->num_queues; i++) {
5141 rxr = &adapter->rx_rings[i];
5142 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5143 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5144 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5145 }
5146
5147 /* disable drop for each vf */
5148 for (int i = 0; i < adapter->num_vfs; i++) {
5149 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5150 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5151 }
5152 } /* ixgbe_disable_rx_drop */
5153
5154 /************************************************************************
5155 * ixgbe_sysctl_advertise
5156 *
5157 * SYSCTL wrapper around setting advertised speed
5158 ************************************************************************/
5159 static int
5160 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5161 {
5162 struct sysctlnode node = *rnode;
5163 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5164 int error = 0, advertise;
5165
5166 advertise = adapter->advertise;
5167 node.sysctl_data = &advertise;
5168 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5169 if (error != 0 || newp == NULL)
5170 return error;
5171
5172 return ixgbe_set_advertise(adapter, advertise);
5173 } /* ixgbe_sysctl_advertise */
5174
5175 /************************************************************************
5176 * ixgbe_set_advertise - Control advertised link speed
5177 *
5178 * Flags:
5179 * 0x00 - Default (all capable link speed)
5180 * 0x01 - advertise 100 Mb
5181 * 0x02 - advertise 1G
5182 * 0x04 - advertise 10G
5183 * 0x08 - advertise 10 Mb
5184 * 0x10 - advertise 2.5G
5185 * 0x20 - advertise 5G
5186 ************************************************************************/
5187 static int
5188 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5189 {
5190 device_t dev;
5191 struct ixgbe_hw *hw;
5192 ixgbe_link_speed speed = 0;
5193 ixgbe_link_speed link_caps = 0;
5194 s32 err = IXGBE_NOT_IMPLEMENTED;
5195 bool negotiate = FALSE;
5196
5197 /* Checks to validate new value */
5198 if (adapter->advertise == advertise) /* no change */
5199 return (0);
5200
5201 dev = adapter->dev;
5202 hw = &adapter->hw;
5203
5204 /* No speed changes for backplane media */
5205 if (hw->phy.media_type == ixgbe_media_type_backplane)
5206 return (ENODEV);
5207
5208 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5209 (hw->phy.multispeed_fiber))) {
5210 device_printf(dev,
5211 "Advertised speed can only be set on copper or "
5212 "multispeed fiber media types.\n");
5213 return (EINVAL);
5214 }
5215
5216 if (advertise < 0x0 || advertise > 0x2f) {
5217 device_printf(dev,
5218 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5219 return (EINVAL);
5220 }
5221
5222 if (hw->mac.ops.get_link_capabilities) {
5223 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5224 &negotiate);
5225 if (err != IXGBE_SUCCESS) {
5226 device_printf(dev, "Unable to determine supported advertise speeds\n");
5227 return (ENODEV);
5228 }
5229 }
5230
5231 /* Set new value and report new advertised mode */
5232 if (advertise & 0x1) {
5233 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5234 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5235 return (EINVAL);
5236 }
5237 speed |= IXGBE_LINK_SPEED_100_FULL;
5238 }
5239 if (advertise & 0x2) {
5240 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5241 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5242 return (EINVAL);
5243 }
5244 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5245 }
5246 if (advertise & 0x4) {
5247 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5248 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5249 return (EINVAL);
5250 }
5251 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5252 }
5253 if (advertise & 0x8) {
5254 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5255 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5256 return (EINVAL);
5257 }
5258 speed |= IXGBE_LINK_SPEED_10_FULL;
5259 }
5260 if (advertise & 0x10) {
5261 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5262 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5263 return (EINVAL);
5264 }
5265 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5266 }
5267 if (advertise & 0x20) {
5268 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5269 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5270 return (EINVAL);
5271 }
5272 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5273 }
5274 if (advertise == 0)
5275 speed = link_caps; /* All capable link speed */
5276
5277 hw->mac.autotry_restart = TRUE;
5278 hw->mac.ops.setup_link(hw, speed, TRUE);
5279 adapter->advertise = advertise;
5280
5281 return (0);
5282 } /* ixgbe_set_advertise */
5283
5284 /************************************************************************
5285 * ixgbe_get_advertise - Get current advertised speed settings
5286 *
5287 * Formatted for sysctl usage.
5288 * Flags:
5289 * 0x01 - advertise 100 Mb
5290 * 0x02 - advertise 1G
5291 * 0x04 - advertise 10G
5292 * 0x08 - advertise 10 Mb (yes, Mb)
5293 * 0x10 - advertise 2.5G
5294 * 0x20 - advertise 5G
5295 ************************************************************************/
5296 static int
5297 ixgbe_get_advertise(struct adapter *adapter)
5298 {
5299 struct ixgbe_hw *hw = &adapter->hw;
5300 int speed;
5301 ixgbe_link_speed link_caps = 0;
5302 s32 err;
5303 bool negotiate = FALSE;
5304
5305 /*
5306 * Advertised speed means nothing unless it's copper or
5307 * multi-speed fiber
5308 */
5309 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5310 !(hw->phy.multispeed_fiber))
5311 return (0);
5312
5313 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5314 if (err != IXGBE_SUCCESS)
5315 return (0);
5316
5317 speed =
5318 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5319 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5320 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5321 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5322 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5323 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5324
5325 return speed;
5326 } /* ixgbe_get_advertise */
5327
5328 /************************************************************************
5329 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5330 *
5331 * Control values:
5332 * 0/1 - off / on (use default value of 1000)
5333 *
5334 * Legal timer values are:
5335 * 50,100,250,500,1000,2000,5000,10000
5336 *
5337 * Turning off interrupt moderation will also turn this off.
5338 ************************************************************************/
5339 static int
5340 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5341 {
5342 struct sysctlnode node = *rnode;
5343 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5344 struct ifnet *ifp = adapter->ifp;
5345 int error;
5346 int newval;
5347
5348 newval = adapter->dmac;
5349 node.sysctl_data = &newval;
5350 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5351 if ((error) || (newp == NULL))
5352 return (error);
5353
5354 switch (newval) {
5355 case 0:
5356 /* Disabled */
5357 adapter->dmac = 0;
5358 break;
5359 case 1:
5360 /* Enable and use default */
5361 adapter->dmac = 1000;
5362 break;
5363 case 50:
5364 case 100:
5365 case 250:
5366 case 500:
5367 case 1000:
5368 case 2000:
5369 case 5000:
5370 case 10000:
5371 /* Legal values - allow */
5372 adapter->dmac = newval;
5373 break;
5374 default:
5375 /* Do nothing, illegal value */
5376 return (EINVAL);
5377 }
5378
5379 /* Re-initialize hardware if it's already running */
5380 if (ifp->if_flags & IFF_RUNNING)
5381 ifp->if_init(ifp);
5382
5383 return (0);
5384 }
5385
5386 #ifdef IXGBE_DEBUG
5387 /************************************************************************
5388 * ixgbe_sysctl_power_state
5389 *
5390 * Sysctl to test power states
5391 * Values:
5392 * 0 - set device to D0
5393 * 3 - set device to D3
5394 * (none) - get current device power state
5395 ************************************************************************/
5396 static int
5397 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5398 {
5399 #ifdef notyet
5400 struct sysctlnode node = *rnode;
5401 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5402 device_t dev = adapter->dev;
5403 int curr_ps, new_ps, error = 0;
5404
5405 curr_ps = new_ps = pci_get_powerstate(dev);
5406
5407 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5408 if ((error) || (req->newp == NULL))
5409 return (error);
5410
5411 if (new_ps == curr_ps)
5412 return (0);
5413
5414 if (new_ps == 3 && curr_ps == 0)
5415 error = DEVICE_SUSPEND(dev);
5416 else if (new_ps == 0 && curr_ps == 3)
5417 error = DEVICE_RESUME(dev);
5418 else
5419 return (EINVAL);
5420
5421 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5422
5423 return (error);
5424 #else
5425 return 0;
5426 #endif
5427 } /* ixgbe_sysctl_power_state */
5428 #endif
5429
5430 /************************************************************************
5431 * ixgbe_sysctl_wol_enable
5432 *
5433 * Sysctl to enable/disable the WoL capability,
5434 * if supported by the adapter.
5435 *
5436 * Values:
5437 * 0 - disabled
5438 * 1 - enabled
5439 ************************************************************************/
5440 static int
5441 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5442 {
5443 struct sysctlnode node = *rnode;
5444 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5445 struct ixgbe_hw *hw = &adapter->hw;
5446 bool new_wol_enabled;
5447 int error = 0;
5448
5449 new_wol_enabled = hw->wol_enabled;
5450 node.sysctl_data = &new_wol_enabled;
5451 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5452 if ((error) || (newp == NULL))
5453 return (error);
5454 if (new_wol_enabled == hw->wol_enabled)
5455 return (0);
5456
5457 if (new_wol_enabled && !adapter->wol_support)
5458 return (ENODEV);
5459 else
5460 hw->wol_enabled = new_wol_enabled;
5461
5462 return (0);
5463 } /* ixgbe_sysctl_wol_enable */
5464
5465 /************************************************************************
5466 * ixgbe_sysctl_wufc - Wake Up Filter Control
5467 *
5468 * Sysctl to enable/disable the types of packets that the
5469 * adapter will wake up on upon receipt.
5470 * Flags:
5471 * 0x1 - Link Status Change
5472 * 0x2 - Magic Packet
5473 * 0x4 - Direct Exact
5474 * 0x8 - Directed Multicast
5475 * 0x10 - Broadcast
5476 * 0x20 - ARP/IPv4 Request Packet
5477 * 0x40 - Direct IPv4 Packet
5478 * 0x80 - Direct IPv6 Packet
5479 *
5480 * Settings not listed above will cause the sysctl to return an error.
5481 ************************************************************************/
5482 static int
5483 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5484 {
5485 struct sysctlnode node = *rnode;
5486 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5487 int error = 0;
5488 u32 new_wufc;
5489
5490 new_wufc = adapter->wufc;
5491 node.sysctl_data = &new_wufc;
5492 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5493 if ((error) || (newp == NULL))
5494 return (error);
5495 if (new_wufc == adapter->wufc)
5496 return (0);
5497
5498 if (new_wufc & 0xffffff00)
5499 return (EINVAL);
5500
5501 new_wufc &= 0xff;
5502 new_wufc |= (0xffffff & adapter->wufc);
5503 adapter->wufc = new_wufc;
5504
5505 return (0);
5506 } /* ixgbe_sysctl_wufc */
5507
5508 #ifdef IXGBE_DEBUG
5509 /************************************************************************
5510 * ixgbe_sysctl_print_rss_config
5511 ************************************************************************/
5512 static int
5513 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5514 {
5515 #ifdef notyet
5516 struct sysctlnode node = *rnode;
5517 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5518 struct ixgbe_hw *hw = &adapter->hw;
5519 device_t dev = adapter->dev;
5520 struct sbuf *buf;
5521 int error = 0, reta_size;
5522 u32 reg;
5523
5524 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5525 if (!buf) {
5526 device_printf(dev, "Could not allocate sbuf for output.\n");
5527 return (ENOMEM);
5528 }
5529
5530 // TODO: use sbufs to make a string to print out
5531 /* Set multiplier for RETA setup and table size based on MAC */
5532 switch (adapter->hw.mac.type) {
5533 case ixgbe_mac_X550:
5534 case ixgbe_mac_X550EM_x:
5535 case ixgbe_mac_X550EM_a:
5536 reta_size = 128;
5537 break;
5538 default:
5539 reta_size = 32;
5540 break;
5541 }
5542
5543 /* Print out the redirection table */
5544 sbuf_cat(buf, "\n");
5545 for (int i = 0; i < reta_size; i++) {
5546 if (i < 32) {
5547 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5548 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5549 } else {
5550 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5551 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5552 }
5553 }
5554
5555 // TODO: print more config
5556
5557 error = sbuf_finish(buf);
5558 if (error)
5559 device_printf(dev, "Error finishing sbuf: %d\n", error);
5560
5561 sbuf_delete(buf);
5562 #endif
5563 return (0);
5564 } /* ixgbe_sysctl_print_rss_config */
5565 #endif /* IXGBE_DEBUG */
5566
5567 /************************************************************************
5568 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5569 *
5570 * For X552/X557-AT devices using an external PHY
5571 ************************************************************************/
5572 static int
5573 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5574 {
5575 struct sysctlnode node = *rnode;
5576 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5577 struct ixgbe_hw *hw = &adapter->hw;
5578 int val;
5579 u16 reg;
5580 int error;
5581
5582 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5583 device_printf(adapter->dev,
5584 "Device has no supported external thermal sensor.\n");
5585 return (ENODEV);
5586 }
5587
5588 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5589 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5590 device_printf(adapter->dev,
5591 "Error reading from PHY's current temperature register\n");
5592 return (EAGAIN);
5593 }
5594
5595 node.sysctl_data = &val;
5596
5597 /* Shift temp for output */
5598 val = reg >> 8;
5599
5600 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5601 if ((error) || (newp == NULL))
5602 return (error);
5603
5604 return (0);
5605 } /* ixgbe_sysctl_phy_temp */
5606
5607 /************************************************************************
5608 * ixgbe_sysctl_phy_overtemp_occurred
5609 *
5610 * Reports (directly from the PHY) whether the current PHY
5611 * temperature is over the overtemp threshold.
5612 ************************************************************************/
5613 static int
5614 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5615 {
5616 struct sysctlnode node = *rnode;
5617 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5618 struct ixgbe_hw *hw = &adapter->hw;
5619 int val, error;
5620 u16 reg;
5621
5622 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5623 device_printf(adapter->dev,
5624 "Device has no supported external thermal sensor.\n");
5625 return (ENODEV);
5626 }
5627
5628 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5629 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5630 device_printf(adapter->dev,
5631 "Error reading from PHY's temperature status register\n");
5632 return (EAGAIN);
5633 }
5634
5635 node.sysctl_data = &val;
5636
5637 /* Get occurrence bit */
5638 val = !!(reg & 0x4000);
5639
5640 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5641 if ((error) || (newp == NULL))
5642 return (error);
5643
5644 return (0);
5645 } /* ixgbe_sysctl_phy_overtemp_occurred */
5646
5647 /************************************************************************
5648 * ixgbe_sysctl_eee_state
5649 *
5650 * Sysctl to set EEE power saving feature
5651 * Values:
5652 * 0 - disable EEE
5653 * 1 - enable EEE
5654 * (none) - get current device EEE state
5655 ************************************************************************/
5656 static int
5657 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5658 {
5659 struct sysctlnode node = *rnode;
5660 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5661 struct ifnet *ifp = adapter->ifp;
5662 device_t dev = adapter->dev;
5663 int curr_eee, new_eee, error = 0;
5664 s32 retval;
5665
5666 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5667 node.sysctl_data = &new_eee;
5668 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5669 if ((error) || (newp == NULL))
5670 return (error);
5671
5672 /* Nothing to do */
5673 if (new_eee == curr_eee)
5674 return (0);
5675
5676 /* Not supported */
5677 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5678 return (EINVAL);
5679
5680 /* Bounds checking */
5681 if ((new_eee < 0) || (new_eee > 1))
5682 return (EINVAL);
5683
5684 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5685 if (retval) {
5686 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5687 return (EINVAL);
5688 }
5689
5690 /* Restart auto-neg */
5691 ifp->if_init(ifp);
5692
5693 device_printf(dev, "New EEE state: %d\n", new_eee);
5694
5695 /* Cache new value */
5696 if (new_eee)
5697 adapter->feat_en |= IXGBE_FEATURE_EEE;
5698 else
5699 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5700
5701 return (error);
5702 } /* ixgbe_sysctl_eee_state */
5703
5704 #define PRINTQS(adapter, regname) \
5705 do { \
5706 struct ixgbe_hw *_hw = &(adapter)->hw; \
5707 int _i; \
5708 \
5709 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5710 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5711 printf((_i == 0) ? "\t" : " "); \
5712 printf("%08x", IXGBE_READ_REG(_hw, \
5713 IXGBE_##regname(_i))); \
5714 } \
5715 printf("\n"); \
5716 } while (0)
5717
5718 /************************************************************************
5719 * ixgbe_print_debug_info
5720 *
5721 * Called only when em_display_debug_stats is enabled.
5722 * Provides a way to take a look at important statistics
5723 * maintained by the driver and hardware.
5724 ************************************************************************/
5725 static void
5726 ixgbe_print_debug_info(struct adapter *adapter)
5727 {
5728 device_t dev = adapter->dev;
5729 struct ixgbe_hw *hw = &adapter->hw;
5730 int table_size;
5731 int i;
5732
5733 switch (adapter->hw.mac.type) {
5734 case ixgbe_mac_X550:
5735 case ixgbe_mac_X550EM_x:
5736 case ixgbe_mac_X550EM_a:
5737 table_size = 128;
5738 break;
5739 default:
5740 table_size = 32;
5741 break;
5742 }
5743
5744 device_printf(dev, "[E]RETA:\n");
5745 for (i = 0; i < table_size; i++) {
5746 if (i < 32)
5747 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5748 IXGBE_RETA(i)));
5749 else
5750 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5751 IXGBE_ERETA(i - 32)));
5752 }
5753
5754 device_printf(dev, "queue:");
5755 for (i = 0; i < adapter->num_queues; i++) {
5756 printf((i == 0) ? "\t" : " ");
5757 printf("%8d", i);
5758 }
5759 printf("\n");
5760 PRINTQS(adapter, RDBAL);
5761 PRINTQS(adapter, RDBAH);
5762 PRINTQS(adapter, RDLEN);
5763 PRINTQS(adapter, SRRCTL);
5764 PRINTQS(adapter, RDH);
5765 PRINTQS(adapter, RDT);
5766 PRINTQS(adapter, RXDCTL);
5767
5768 device_printf(dev, "RQSMR:");
5769 for (i = 0; i < adapter->num_queues / 4; i++) {
5770 printf((i == 0) ? "\t" : " ");
5771 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5772 }
5773 printf("\n");
5774
5775 device_printf(dev, "disabled_count:");
5776 for (i = 0; i < adapter->num_queues; i++) {
5777 printf((i == 0) ? "\t" : " ");
5778 printf("%8d", adapter->queues[i].disabled_count);
5779 }
5780 printf("\n");
5781
5782 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5783 if (hw->mac.type != ixgbe_mac_82598EB) {
5784 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5785 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5786 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5787 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5788 }
5789 } /* ixgbe_print_debug_info */
5790
5791 /************************************************************************
5792 * ixgbe_sysctl_debug
5793 ************************************************************************/
5794 static int
5795 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5796 {
5797 struct sysctlnode node = *rnode;
5798 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5799 int error, result = 0;
5800
5801 node.sysctl_data = &result;
5802 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5803
5804 if (error || newp == NULL)
5805 return error;
5806
5807 if (result == 1)
5808 ixgbe_print_debug_info(adapter);
5809
5810 return 0;
5811 } /* ixgbe_sysctl_debug */
5812
5813 /************************************************************************
5814 * ixgbe_init_device_features
5815 ************************************************************************/
5816 static void
5817 ixgbe_init_device_features(struct adapter *adapter)
5818 {
5819 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5820 | IXGBE_FEATURE_RSS
5821 | IXGBE_FEATURE_MSI
5822 | IXGBE_FEATURE_MSIX
5823 | IXGBE_FEATURE_LEGACY_IRQ
5824 | IXGBE_FEATURE_LEGACY_TX;
5825
5826 /* Set capabilities first... */
5827 switch (adapter->hw.mac.type) {
5828 case ixgbe_mac_82598EB:
5829 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5830 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5831 break;
5832 case ixgbe_mac_X540:
5833 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5834 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5835 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5836 (adapter->hw.bus.func == 0))
5837 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5838 break;
5839 case ixgbe_mac_X550:
5840 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5841 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5842 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5843 break;
5844 case ixgbe_mac_X550EM_x:
5845 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5846 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5847 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5848 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5849 break;
5850 case ixgbe_mac_X550EM_a:
5851 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5852 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5853 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5854 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5855 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5856 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5857 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5858 }
5859 break;
5860 case ixgbe_mac_82599EB:
5861 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5862 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5863 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5864 (adapter->hw.bus.func == 0))
5865 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5866 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5867 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5868 break;
5869 default:
5870 break;
5871 }
5872
5873 /* Enabled by default... */
5874 /* Fan failure detection */
5875 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5876 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5877 /* Netmap */
5878 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5879 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5880 /* EEE */
5881 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5882 adapter->feat_en |= IXGBE_FEATURE_EEE;
5883 /* Thermal Sensor */
5884 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5885 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5886
5887 /* Enabled via global sysctl... */
5888 /* Flow Director */
5889 if (ixgbe_enable_fdir) {
5890 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5891 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5892 else
5893 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5894 }
5895 /* Legacy (single queue) transmit */
5896 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5897 ixgbe_enable_legacy_tx)
5898 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5899 /*
5900 * Message Signal Interrupts - Extended (MSI-X)
5901 * Normal MSI is only enabled if MSI-X calls fail.
5902 */
5903 if (!ixgbe_enable_msix)
5904 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5905 /* Receive-Side Scaling (RSS) */
5906 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5907 adapter->feat_en |= IXGBE_FEATURE_RSS;
5908
5909 /* Disable features with unmet dependencies... */
5910 /* No MSI-X */
5911 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5912 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5913 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5914 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5915 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5916 }
5917 } /* ixgbe_init_device_features */
5918
5919 /************************************************************************
5920 * ixgbe_probe - Device identification routine
5921 *
5922 * Determines if the driver should be loaded on
5923 * adapter based on its PCI vendor/device ID.
5924 *
5925 * return BUS_PROBE_DEFAULT on success, positive on failure
5926 ************************************************************************/
5927 static int
5928 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5929 {
5930 const struct pci_attach_args *pa = aux;
5931
5932 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5933 }
5934
5935 static const ixgbe_vendor_info_t *
5936 ixgbe_lookup(const struct pci_attach_args *pa)
5937 {
5938 const ixgbe_vendor_info_t *ent;
5939 pcireg_t subid;
5940
5941 INIT_DEBUGOUT("ixgbe_lookup: begin");
5942
5943 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5944 return NULL;
5945
5946 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5947
5948 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5949 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5950 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5951 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5952 (ent->subvendor_id == 0)) &&
5953 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5954 (ent->subdevice_id == 0))) {
5955 return ent;
5956 }
5957 }
5958 return NULL;
5959 }
5960
5961 static int
5962 ixgbe_ifflags_cb(struct ethercom *ec)
5963 {
5964 struct ifnet *ifp = &ec->ec_if;
5965 struct adapter *adapter = ifp->if_softc;
5966 int change, rc = 0;
5967
5968 IXGBE_CORE_LOCK(adapter);
5969
5970 change = ifp->if_flags ^ adapter->if_flags;
5971 if (change != 0)
5972 adapter->if_flags = ifp->if_flags;
5973
5974 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5975 rc = ENETRESET;
5976 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5977 ixgbe_set_promisc(adapter);
5978
5979 /* Set up VLAN support and filter */
5980 ixgbe_setup_vlan_hw_support(adapter);
5981
5982 IXGBE_CORE_UNLOCK(adapter);
5983
5984 return rc;
5985 }
5986
5987 /************************************************************************
5988 * ixgbe_ioctl - Ioctl entry point
5989 *
5990 * Called when the user wants to configure the interface.
5991 *
5992 * return 0 on success, positive on failure
5993 ************************************************************************/
5994 static int
5995 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5996 {
5997 struct adapter *adapter = ifp->if_softc;
5998 struct ixgbe_hw *hw = &adapter->hw;
5999 struct ifcapreq *ifcr = data;
6000 struct ifreq *ifr = data;
6001 int error = 0;
6002 int l4csum_en;
6003 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
6004 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
6005
6006 switch (command) {
6007 case SIOCSIFFLAGS:
6008 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6009 break;
6010 case SIOCADDMULTI:
6011 case SIOCDELMULTI:
6012 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6013 break;
6014 case SIOCSIFMEDIA:
6015 case SIOCGIFMEDIA:
6016 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6017 break;
6018 case SIOCSIFCAP:
6019 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6020 break;
6021 case SIOCSIFMTU:
6022 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6023 break;
6024 #ifdef __NetBSD__
6025 case SIOCINITIFADDR:
6026 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6027 break;
6028 case SIOCGIFFLAGS:
6029 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6030 break;
6031 case SIOCGIFAFLAG_IN:
6032 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6033 break;
6034 case SIOCGIFADDR:
6035 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6036 break;
6037 case SIOCGIFMTU:
6038 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6039 break;
6040 case SIOCGIFCAP:
6041 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6042 break;
6043 case SIOCGETHERCAP:
6044 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6045 break;
6046 case SIOCGLIFADDR:
6047 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6048 break;
6049 case SIOCZIFDATA:
6050 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6051 hw->mac.ops.clear_hw_cntrs(hw);
6052 ixgbe_clear_evcnt(adapter);
6053 break;
6054 case SIOCAIFADDR:
6055 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6056 break;
6057 #endif
6058 default:
6059 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6060 break;
6061 }
6062
6063 switch (command) {
6064 case SIOCSIFMEDIA:
6065 case SIOCGIFMEDIA:
6066 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6067 case SIOCGI2C:
6068 {
6069 struct ixgbe_i2c_req i2c;
6070
6071 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6072 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6073 if (error != 0)
6074 break;
6075 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6076 error = EINVAL;
6077 break;
6078 }
6079 if (i2c.len > sizeof(i2c.data)) {
6080 error = EINVAL;
6081 break;
6082 }
6083
6084 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6085 i2c.dev_addr, i2c.data);
6086 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6087 break;
6088 }
6089 case SIOCSIFCAP:
6090 /* Layer-4 Rx checksum offload has to be turned on and
6091 * off as a unit.
6092 */
6093 l4csum_en = ifcr->ifcr_capenable & l4csum;
6094 if (l4csum_en != l4csum && l4csum_en != 0)
6095 return EINVAL;
6096 /*FALLTHROUGH*/
6097 case SIOCADDMULTI:
6098 case SIOCDELMULTI:
6099 case SIOCSIFFLAGS:
6100 case SIOCSIFMTU:
6101 default:
6102 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6103 return error;
6104 if ((ifp->if_flags & IFF_RUNNING) == 0)
6105 ;
6106 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6107 IXGBE_CORE_LOCK(adapter);
6108 if ((ifp->if_flags & IFF_RUNNING) != 0)
6109 ixgbe_init_locked(adapter);
6110 ixgbe_recalculate_max_frame(adapter);
6111 IXGBE_CORE_UNLOCK(adapter);
6112 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6113 /*
6114 * Multicast list has changed; set the hardware filter
6115 * accordingly.
6116 */
6117 IXGBE_CORE_LOCK(adapter);
6118 ixgbe_disable_intr(adapter);
6119 ixgbe_set_multi(adapter);
6120 ixgbe_enable_intr(adapter);
6121 IXGBE_CORE_UNLOCK(adapter);
6122 }
6123 return 0;
6124 }
6125
6126 return error;
6127 } /* ixgbe_ioctl */
6128
6129 /************************************************************************
6130 * ixgbe_check_fan_failure
6131 ************************************************************************/
6132 static void
6133 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6134 {
6135 u32 mask;
6136
6137 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6138 IXGBE_ESDP_SDP1;
6139
6140 if (reg & mask)
6141 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6142 } /* ixgbe_check_fan_failure */
6143
6144 /************************************************************************
6145 * ixgbe_handle_que
6146 ************************************************************************/
6147 static void
6148 ixgbe_handle_que(void *context)
6149 {
6150 struct ix_queue *que = context;
6151 struct adapter *adapter = que->adapter;
6152 struct tx_ring *txr = que->txr;
6153 struct ifnet *ifp = adapter->ifp;
6154 bool more = false;
6155
6156 que->handleq.ev_count++;
6157
6158 if (ifp->if_flags & IFF_RUNNING) {
6159 more = ixgbe_rxeof(que);
6160 IXGBE_TX_LOCK(txr);
6161 more |= ixgbe_txeof(txr);
6162 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6163 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6164 ixgbe_mq_start_locked(ifp, txr);
6165 /* Only for queue 0 */
6166 /* NetBSD still needs this for CBQ */
6167 if ((&adapter->queues[0] == que)
6168 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6169 ixgbe_legacy_start_locked(ifp, txr);
6170 IXGBE_TX_UNLOCK(txr);
6171 }
6172
6173 if (more) {
6174 que->req.ev_count++;
6175 ixgbe_sched_handle_que(adapter, que);
6176 } else if (que->res != NULL) {
6177 /* Re-enable this interrupt */
6178 ixgbe_enable_queue(adapter, que->msix);
6179 } else
6180 ixgbe_enable_intr(adapter);
6181
6182 return;
6183 } /* ixgbe_handle_que */
6184
6185 /************************************************************************
6186 * ixgbe_handle_que_work
6187 ************************************************************************/
6188 static void
6189 ixgbe_handle_que_work(struct work *wk, void *context)
6190 {
6191 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6192
6193 /*
6194 * "enqueued flag" is not required here.
6195 * See ixgbe_msix_que().
6196 */
6197 ixgbe_handle_que(que);
6198 }
6199
6200 /************************************************************************
6201 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6202 ************************************************************************/
6203 static int
6204 ixgbe_allocate_legacy(struct adapter *adapter,
6205 const struct pci_attach_args *pa)
6206 {
6207 device_t dev = adapter->dev;
6208 struct ix_queue *que = adapter->queues;
6209 struct tx_ring *txr = adapter->tx_rings;
6210 int counts[PCI_INTR_TYPE_SIZE];
6211 pci_intr_type_t intr_type, max_type;
6212 char intrbuf[PCI_INTRSTR_LEN];
6213 const char *intrstr = NULL;
6214
6215 /* We allocate a single interrupt resource */
6216 max_type = PCI_INTR_TYPE_MSI;
6217 counts[PCI_INTR_TYPE_MSIX] = 0;
6218 counts[PCI_INTR_TYPE_MSI] =
6219 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6220 /* Check not feat_en but feat_cap to fallback to INTx */
6221 counts[PCI_INTR_TYPE_INTX] =
6222 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6223
6224 alloc_retry:
6225 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6226 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6227 return ENXIO;
6228 }
6229 adapter->osdep.nintrs = 1;
6230 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6231 intrbuf, sizeof(intrbuf));
6232 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6233 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6234 device_xname(dev));
6235 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6236 if (adapter->osdep.ihs[0] == NULL) {
6237 aprint_error_dev(dev,"unable to establish %s\n",
6238 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6239 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6240 adapter->osdep.intrs = NULL;
6241 switch (intr_type) {
6242 case PCI_INTR_TYPE_MSI:
6243 /* The next try is for INTx: Disable MSI */
6244 max_type = PCI_INTR_TYPE_INTX;
6245 counts[PCI_INTR_TYPE_INTX] = 1;
6246 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6247 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6248 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6249 goto alloc_retry;
6250 } else
6251 break;
6252 case PCI_INTR_TYPE_INTX:
6253 default:
6254 /* See below */
6255 break;
6256 }
6257 }
6258 if (intr_type == PCI_INTR_TYPE_INTX) {
6259 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6260 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6261 }
6262 if (adapter->osdep.ihs[0] == NULL) {
6263 aprint_error_dev(dev,
6264 "couldn't establish interrupt%s%s\n",
6265 intrstr ? " at " : "", intrstr ? intrstr : "");
6266 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6267 adapter->osdep.intrs = NULL;
6268 return ENXIO;
6269 }
6270 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6271 /*
6272 * Try allocating a fast interrupt and the associated deferred
6273 * processing contexts.
6274 */
6275 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6276 txr->txr_si =
6277 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6278 ixgbe_deferred_mq_start, txr);
6279 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6280 ixgbe_handle_que, que);
6281
6282 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6283 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6284 aprint_error_dev(dev,
6285 "could not establish software interrupts\n");
6286
6287 return ENXIO;
6288 }
6289 /* For simplicity in the handlers */
6290 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6291
6292 return (0);
6293 } /* ixgbe_allocate_legacy */
6294
6295 /************************************************************************
6296 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6297 ************************************************************************/
6298 static int
6299 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6300 {
6301 device_t dev = adapter->dev;
6302 struct ix_queue *que = adapter->queues;
6303 struct tx_ring *txr = adapter->tx_rings;
6304 pci_chipset_tag_t pc;
6305 char intrbuf[PCI_INTRSTR_LEN];
6306 char intr_xname[32];
6307 char wqname[MAXCOMLEN];
6308 const char *intrstr = NULL;
6309 int error, vector = 0;
6310 int cpu_id = 0;
6311 kcpuset_t *affinity;
6312 #ifdef RSS
6313 unsigned int rss_buckets = 0;
6314 kcpuset_t cpu_mask;
6315 #endif
6316
6317 pc = adapter->osdep.pc;
6318 #ifdef RSS
6319 /*
6320 * If we're doing RSS, the number of queues needs to
6321 * match the number of RSS buckets that are configured.
6322 *
6323 * + If there's more queues than RSS buckets, we'll end
6324 * up with queues that get no traffic.
6325 *
6326 * + If there's more RSS buckets than queues, we'll end
6327 * up having multiple RSS buckets map to the same queue,
6328 * so there'll be some contention.
6329 */
6330 rss_buckets = rss_getnumbuckets();
6331 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6332 (adapter->num_queues != rss_buckets)) {
6333 device_printf(dev,
6334 "%s: number of queues (%d) != number of RSS buckets (%d)"
6335 "; performance will be impacted.\n",
6336 __func__, adapter->num_queues, rss_buckets);
6337 }
6338 #endif
6339
6340 adapter->osdep.nintrs = adapter->num_queues + 1;
6341 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6342 adapter->osdep.nintrs) != 0) {
6343 aprint_error_dev(dev,
6344 "failed to allocate MSI-X interrupt\n");
6345 return (ENXIO);
6346 }
6347
6348 kcpuset_create(&affinity, false);
6349 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6350 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6351 device_xname(dev), i);
6352 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6353 sizeof(intrbuf));
6354 #ifdef IXGBE_MPSAFE
6355 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6356 true);
6357 #endif
6358 /* Set the handler function */
6359 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6360 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6361 intr_xname);
6362 if (que->res == NULL) {
6363 aprint_error_dev(dev,
6364 "Failed to register QUE handler\n");
6365 error = ENXIO;
6366 goto err_out;
6367 }
6368 que->msix = vector;
6369 adapter->active_queues |= (u64)(1 << que->msix);
6370
6371 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6372 #ifdef RSS
6373 /*
6374 * The queue ID is used as the RSS layer bucket ID.
6375 * We look up the queue ID -> RSS CPU ID and select
6376 * that.
6377 */
6378 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6379 CPU_SETOF(cpu_id, &cpu_mask);
6380 #endif
6381 } else {
6382 /*
6383 * Bind the MSI-X vector, and thus the
6384 * rings to the corresponding CPU.
6385 *
6386 * This just happens to match the default RSS
6387 * round-robin bucket -> queue -> CPU allocation.
6388 */
6389 if (adapter->num_queues > 1)
6390 cpu_id = i;
6391 }
6392 /* Round-robin affinity */
6393 kcpuset_zero(affinity);
6394 kcpuset_set(affinity, cpu_id % ncpu);
6395 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6396 NULL);
6397 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6398 intrstr);
6399 if (error == 0) {
6400 #if 1 /* def IXGBE_DEBUG */
6401 #ifdef RSS
6402 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6403 cpu_id % ncpu);
6404 #else
6405 aprint_normal(", bound queue %d to cpu %d", i,
6406 cpu_id % ncpu);
6407 #endif
6408 #endif /* IXGBE_DEBUG */
6409 }
6410 aprint_normal("\n");
6411
6412 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6413 txr->txr_si = softint_establish(
6414 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6415 ixgbe_deferred_mq_start, txr);
6416 if (txr->txr_si == NULL) {
6417 aprint_error_dev(dev,
6418 "couldn't establish software interrupt\n");
6419 error = ENXIO;
6420 goto err_out;
6421 }
6422 }
6423 que->que_si
6424 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6425 ixgbe_handle_que, que);
6426 if (que->que_si == NULL) {
6427 aprint_error_dev(dev,
6428 "couldn't establish software interrupt\n");
6429 error = ENXIO;
6430 goto err_out;
6431 }
6432 }
6433 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6434 error = workqueue_create(&adapter->txr_wq, wqname,
6435 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6436 IXGBE_WORKQUEUE_FLAGS);
6437 if (error) {
6438 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6439 goto err_out;
6440 }
6441 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6442
6443 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6444 error = workqueue_create(&adapter->que_wq, wqname,
6445 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6446 IXGBE_WORKQUEUE_FLAGS);
6447 if (error) {
6448 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6449 goto err_out;
6450 }
6451
6452 /* and Link */
6453 cpu_id++;
6454 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6455 adapter->vector = vector;
6456 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6457 sizeof(intrbuf));
6458 #ifdef IXGBE_MPSAFE
6459 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6460 true);
6461 #endif
6462 /* Set the link handler function */
6463 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6464 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6465 intr_xname);
6466 if (adapter->osdep.ihs[vector] == NULL) {
6467 aprint_error_dev(dev, "Failed to register LINK handler\n");
6468 error = ENXIO;
6469 goto err_out;
6470 }
6471 /* Round-robin affinity */
6472 kcpuset_zero(affinity);
6473 kcpuset_set(affinity, cpu_id % ncpu);
6474 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6475 NULL);
6476
6477 aprint_normal_dev(dev,
6478 "for link, interrupting at %s", intrstr);
6479 if (error == 0)
6480 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6481 else
6482 aprint_normal("\n");
6483
6484 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6485 adapter->mbx_si =
6486 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6487 ixgbe_handle_mbx, adapter);
6488 if (adapter->mbx_si == NULL) {
6489 aprint_error_dev(dev,
6490 "could not establish software interrupts\n");
6491
6492 error = ENXIO;
6493 goto err_out;
6494 }
6495 }
6496
6497 kcpuset_destroy(affinity);
6498 aprint_normal_dev(dev,
6499 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6500
6501 return (0);
6502
6503 err_out:
6504 kcpuset_destroy(affinity);
6505 ixgbe_free_softint(adapter);
6506 ixgbe_free_pciintr_resources(adapter);
6507 return (error);
6508 } /* ixgbe_allocate_msix */
6509
6510 /************************************************************************
6511 * ixgbe_configure_interrupts
6512 *
6513 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6514 * This will also depend on user settings.
6515 ************************************************************************/
6516 static int
6517 ixgbe_configure_interrupts(struct adapter *adapter)
6518 {
6519 device_t dev = adapter->dev;
6520 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6521 int want, queues, msgs;
6522
6523 /* Default to 1 queue if MSI-X setup fails */
6524 adapter->num_queues = 1;
6525
6526 /* Override by tuneable */
6527 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6528 goto msi;
6529
6530 /*
6531 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6532 * interrupt slot.
6533 */
6534 if (ncpu == 1)
6535 goto msi;
6536
6537 /* First try MSI-X */
6538 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6539 msgs = MIN(msgs, IXG_MAX_NINTR);
6540 if (msgs < 2)
6541 goto msi;
6542
6543 adapter->msix_mem = (void *)1; /* XXX */
6544
6545 /* Figure out a reasonable auto config value */
6546 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6547
6548 #ifdef RSS
6549 /* If we're doing RSS, clamp at the number of RSS buckets */
6550 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6551 queues = uimin(queues, rss_getnumbuckets());
6552 #endif
6553 if (ixgbe_num_queues > queues) {
6554 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6555 ixgbe_num_queues = queues;
6556 }
6557
6558 if (ixgbe_num_queues != 0)
6559 queues = ixgbe_num_queues;
6560 else
6561 queues = uimin(queues,
6562 uimin(mac->max_tx_queues, mac->max_rx_queues));
6563
6564 /* reflect correct sysctl value */
6565 ixgbe_num_queues = queues;
6566
6567 /*
6568 * Want one vector (RX/TX pair) per queue
6569 * plus an additional for Link.
6570 */
6571 want = queues + 1;
6572 if (msgs >= want)
6573 msgs = want;
6574 else {
6575 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6576 "%d vectors but %d queues wanted!\n",
6577 msgs, want);
6578 goto msi;
6579 }
6580 adapter->num_queues = queues;
6581 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6582 return (0);
6583
6584 /*
6585 * MSI-X allocation failed or provided us with
6586 * less vectors than needed. Free MSI-X resources
6587 * and we'll try enabling MSI.
6588 */
6589 msi:
6590 /* Without MSI-X, some features are no longer supported */
6591 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6592 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6593 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6594 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6595
6596 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6597 adapter->msix_mem = NULL; /* XXX */
6598 if (msgs > 1)
6599 msgs = 1;
6600 if (msgs != 0) {
6601 msgs = 1;
6602 adapter->feat_en |= IXGBE_FEATURE_MSI;
6603 return (0);
6604 }
6605
6606 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6607 aprint_error_dev(dev,
6608 "Device does not support legacy interrupts.\n");
6609 return 1;
6610 }
6611
6612 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6613
6614 return (0);
6615 } /* ixgbe_configure_interrupts */
6616
6617
6618 /************************************************************************
6619 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6620 *
6621 * Done outside of interrupt context since the driver might sleep
6622 ************************************************************************/
6623 static void
6624 ixgbe_handle_link(void *context)
6625 {
6626 struct adapter *adapter = context;
6627 struct ixgbe_hw *hw = &adapter->hw;
6628
6629 IXGBE_CORE_LOCK(adapter);
6630 ++adapter->link_sicount.ev_count;
6631 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6632 ixgbe_update_link_status(adapter);
6633
6634 /* Re-enable link interrupts */
6635 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6636
6637 IXGBE_CORE_UNLOCK(adapter);
6638 } /* ixgbe_handle_link */
6639
6640 #if 0
6641 /************************************************************************
6642 * ixgbe_rearm_queues
6643 ************************************************************************/
6644 static __inline void
6645 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6646 {
6647 u32 mask;
6648
6649 switch (adapter->hw.mac.type) {
6650 case ixgbe_mac_82598EB:
6651 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6652 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6653 break;
6654 case ixgbe_mac_82599EB:
6655 case ixgbe_mac_X540:
6656 case ixgbe_mac_X550:
6657 case ixgbe_mac_X550EM_x:
6658 case ixgbe_mac_X550EM_a:
6659 mask = (queues & 0xFFFFFFFF);
6660 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6661 mask = (queues >> 32);
6662 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6663 break;
6664 default:
6665 break;
6666 }
6667 } /* ixgbe_rearm_queues */
6668 #endif
6669