ixgbe.c revision 1.166 1 /* $NetBSD: ixgbe.c,v 1.166 2018/09/14 09:51:09 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "ixgbe_sriov.h"
74 #include "vlan.h"
75
76 #include <sys/cprng.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 /************************************************************************
81 * Driver version
82 ************************************************************************/
83 static const char ixgbe_driver_version[] = "4.0.1-k";
84
85
86 /************************************************************************
87 * PCI Device ID Table
88 *
89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s
92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/
95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96 {
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
141 /* required last entry */
142 {0, 0, 0, 0, 0}
143 };
144
145 /************************************************************************
146 * Table of branding strings
147 ************************************************************************/
148 static const char *ixgbe_strings[] = {
149 "Intel(R) PRO/10GbE PCI-Express Network Driver"
150 };
151
152 /************************************************************************
153 * Function prototypes
154 ************************************************************************/
155 static int ixgbe_probe(device_t, cfdata_t, void *);
156 static void ixgbe_attach(device_t, device_t, void *);
157 static int ixgbe_detach(device_t, int);
158 #if 0
159 static int ixgbe_shutdown(device_t);
160 #endif
161 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
162 static bool ixgbe_resume(device_t, const pmf_qual_t *);
163 static int ixgbe_ifflags_cb(struct ethercom *);
164 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
165 static void ixgbe_ifstop(struct ifnet *, int);
166 static int ixgbe_init(struct ifnet *);
167 static void ixgbe_init_locked(struct adapter *);
168 static void ixgbe_stop(void *);
169 static void ixgbe_init_device_features(struct adapter *);
170 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
171 static void ixgbe_add_media_types(struct adapter *);
172 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
173 static int ixgbe_media_change(struct ifnet *);
174 static int ixgbe_allocate_pci_resources(struct adapter *,
175 const struct pci_attach_args *);
176 static void ixgbe_free_softint(struct adapter *);
177 static void ixgbe_get_slot_info(struct adapter *);
178 static int ixgbe_allocate_msix(struct adapter *,
179 const struct pci_attach_args *);
180 static int ixgbe_allocate_legacy(struct adapter *,
181 const struct pci_attach_args *);
182 static int ixgbe_configure_interrupts(struct adapter *);
183 static void ixgbe_free_pciintr_resources(struct adapter *);
184 static void ixgbe_free_pci_resources(struct adapter *);
185 static void ixgbe_local_timer(void *);
186 static void ixgbe_local_timer1(void *);
187 static int ixgbe_setup_interface(device_t, struct adapter *);
188 static void ixgbe_config_gpie(struct adapter *);
189 static void ixgbe_config_dmac(struct adapter *);
190 static void ixgbe_config_delay_values(struct adapter *);
191 static void ixgbe_config_link(struct adapter *);
192 static void ixgbe_check_wol_support(struct adapter *);
193 static int ixgbe_setup_low_power_mode(struct adapter *);
194 #if 0
195 static void ixgbe_rearm_queues(struct adapter *, u64);
196 #endif
197
198 static void ixgbe_initialize_transmit_units(struct adapter *);
199 static void ixgbe_initialize_receive_units(struct adapter *);
200 static void ixgbe_enable_rx_drop(struct adapter *);
201 static void ixgbe_disable_rx_drop(struct adapter *);
202 static void ixgbe_initialize_rss_mapping(struct adapter *);
203
204 static void ixgbe_enable_intr(struct adapter *);
205 static void ixgbe_disable_intr(struct adapter *);
206 static void ixgbe_update_stats_counters(struct adapter *);
207 static void ixgbe_set_promisc(struct adapter *);
208 static void ixgbe_set_multi(struct adapter *);
209 static void ixgbe_update_link_status(struct adapter *);
210 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
211 static void ixgbe_configure_ivars(struct adapter *);
212 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
213 static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
214
215 static void ixgbe_setup_vlan_hw_support(struct adapter *);
216 #if 0
217 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
218 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
219 #endif
220
221 static void ixgbe_add_device_sysctls(struct adapter *);
222 static void ixgbe_add_hw_stats(struct adapter *);
223 static void ixgbe_clear_evcnt(struct adapter *);
224 static int ixgbe_set_flowcntl(struct adapter *, int);
225 static int ixgbe_set_advertise(struct adapter *, int);
226 static int ixgbe_get_advertise(struct adapter *);
227
228 /* Sysctl handlers */
229 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
230 const char *, int *, int);
231 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
234 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
235 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
237 #ifdef IXGBE_DEBUG
238 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
240 #endif
241 static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
245 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
246 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
247 static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
248 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
249 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
250
251 /* Support for pluggable optic modules */
252 static bool ixgbe_sfp_probe(struct adapter *);
253
254 /* Legacy (single vector) interrupt handler */
255 static int ixgbe_legacy_irq(void *);
256
257 /* The MSI/MSI-X Interrupt handlers */
258 static int ixgbe_msix_que(void *);
259 static int ixgbe_msix_link(void *);
260
261 /* Software interrupts for deferred work */
262 static void ixgbe_handle_que(void *);
263 static void ixgbe_handle_link(void *);
264 static void ixgbe_handle_msf(void *);
265 static void ixgbe_handle_mod(void *);
266 static void ixgbe_handle_phy(void *);
267
268 /* Workqueue handler for deferred work */
269 static void ixgbe_handle_que_work(struct work *, void *);
270
271 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
272
273 /************************************************************************
274 * NetBSD Device Interface Entry Points
275 ************************************************************************/
276 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
277 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
278 DVF_DETACH_SHUTDOWN);
279
280 #if 0
281 devclass_t ix_devclass;
282 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
283
284 MODULE_DEPEND(ix, pci, 1, 1, 1);
285 MODULE_DEPEND(ix, ether, 1, 1, 1);
286 #ifdef DEV_NETMAP
287 MODULE_DEPEND(ix, netmap, 1, 1, 1);
288 #endif
289 #endif
290
291 /*
292 * TUNEABLE PARAMETERS:
293 */
294
295 /*
296 * AIM: Adaptive Interrupt Moderation
297 * which means that the interrupt rate
298 * is varied over time based on the
299 * traffic for that interrupt vector
300 */
301 static bool ixgbe_enable_aim = true;
302 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
303 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
304 "Enable adaptive interrupt moderation");
305
306 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
307 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
308 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
309
310 /* How many packets rxeof tries to clean at a time */
311 static int ixgbe_rx_process_limit = 256;
312 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
313 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
314
315 /* How many packets txeof tries to clean at a time */
316 static int ixgbe_tx_process_limit = 256;
317 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
318 &ixgbe_tx_process_limit, 0,
319 "Maximum number of sent packets to process at a time, -1 means unlimited");
320
321 /* Flow control setting, default to full */
322 static int ixgbe_flow_control = ixgbe_fc_full;
323 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
324 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
325
326 /* Which pakcet processing uses workqueue or softint */
327 static bool ixgbe_txrx_workqueue = false;
328
329 /*
330 * Smart speed setting, default to on
331 * this only works as a compile option
332 * right now as its during attach, set
333 * this to 'ixgbe_smart_speed_off' to
334 * disable.
335 */
336 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
337
338 /*
339 * MSI-X should be the default for best performance,
340 * but this allows it to be forced off for testing.
341 */
342 static int ixgbe_enable_msix = 1;
343 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
344 "Enable MSI-X interrupts");
345
346 /*
347 * Number of Queues, can be set to 0,
348 * it then autoconfigures based on the
349 * number of cpus with a max of 8. This
350 * can be overriden manually here.
351 */
352 static int ixgbe_num_queues = 0;
353 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
354 "Number of queues to configure, 0 indicates autoconfigure");
355
356 /*
357 * Number of TX descriptors per ring,
358 * setting higher than RX as this seems
359 * the better performing choice.
360 */
361 static int ixgbe_txd = PERFORM_TXD;
362 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
363 "Number of transmit descriptors per queue");
364
365 /* Number of RX descriptors per ring */
366 static int ixgbe_rxd = PERFORM_RXD;
367 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
368 "Number of receive descriptors per queue");
369
370 /*
371 * Defining this on will allow the use
372 * of unsupported SFP+ modules, note that
373 * doing so you are on your own :)
374 */
375 static int allow_unsupported_sfp = false;
376 #define TUNABLE_INT(__x, __y)
377 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
378
379 /*
380 * Not sure if Flow Director is fully baked,
381 * so we'll default to turning it off.
382 */
383 static int ixgbe_enable_fdir = 0;
384 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
385 "Enable Flow Director");
386
387 /* Legacy Transmit (single queue) */
388 static int ixgbe_enable_legacy_tx = 0;
389 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
390 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
391
392 /* Receive-Side Scaling */
393 static int ixgbe_enable_rss = 1;
394 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
395 "Enable Receive-Side Scaling (RSS)");
396
397 #if 0
398 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
399 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
400 #endif
401
402 #ifdef NET_MPSAFE
403 #define IXGBE_MPSAFE 1
404 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
405 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
406 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
407 #else
408 #define IXGBE_CALLOUT_FLAGS 0
409 #define IXGBE_SOFTINFT_FLAGS 0
410 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
411 #endif
412 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
413
414 /************************************************************************
415 * ixgbe_initialize_rss_mapping
416 ************************************************************************/
417 static void
418 ixgbe_initialize_rss_mapping(struct adapter *adapter)
419 {
420 struct ixgbe_hw *hw = &adapter->hw;
421 u32 reta = 0, mrqc, rss_key[10];
422 int queue_id, table_size, index_mult;
423 int i, j;
424 u32 rss_hash_config;
425
426 /* force use default RSS key. */
427 #ifdef __NetBSD__
428 rss_getkey((uint8_t *) &rss_key);
429 #else
430 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
431 /* Fetch the configured RSS key */
432 rss_getkey((uint8_t *) &rss_key);
433 } else {
434 /* set up random bits */
435 cprng_fast(&rss_key, sizeof(rss_key));
436 }
437 #endif
438
439 /* Set multiplier for RETA setup and table size based on MAC */
440 index_mult = 0x1;
441 table_size = 128;
442 switch (adapter->hw.mac.type) {
443 case ixgbe_mac_82598EB:
444 index_mult = 0x11;
445 break;
446 case ixgbe_mac_X550:
447 case ixgbe_mac_X550EM_x:
448 case ixgbe_mac_X550EM_a:
449 table_size = 512;
450 break;
451 default:
452 break;
453 }
454
455 /* Set up the redirection table */
456 for (i = 0, j = 0; i < table_size; i++, j++) {
457 if (j == adapter->num_queues)
458 j = 0;
459
460 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
461 /*
462 * Fetch the RSS bucket id for the given indirection
463 * entry. Cap it at the number of configured buckets
464 * (which is num_queues.)
465 */
466 queue_id = rss_get_indirection_to_bucket(i);
467 queue_id = queue_id % adapter->num_queues;
468 } else
469 queue_id = (j * index_mult);
470
471 /*
472 * The low 8 bits are for hash value (n+0);
473 * The next 8 bits are for hash value (n+1), etc.
474 */
475 reta = reta >> 8;
476 reta = reta | (((uint32_t) queue_id) << 24);
477 if ((i & 3) == 3) {
478 if (i < 128)
479 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
480 else
481 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
482 reta);
483 reta = 0;
484 }
485 }
486
487 /* Now fill our hash function seeds */
488 for (i = 0; i < 10; i++)
489 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
490
491 /* Perform hash on these packet types */
492 if (adapter->feat_en & IXGBE_FEATURE_RSS)
493 rss_hash_config = rss_gethashconfig();
494 else {
495 /*
496 * Disable UDP - IP fragments aren't currently being handled
497 * and so we end up with a mix of 2-tuple and 4-tuple
498 * traffic.
499 */
500 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
501 | RSS_HASHTYPE_RSS_TCP_IPV4
502 | RSS_HASHTYPE_RSS_IPV6
503 | RSS_HASHTYPE_RSS_TCP_IPV6
504 | RSS_HASHTYPE_RSS_IPV6_EX
505 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
506 }
507
508 mrqc = IXGBE_MRQC_RSSEN;
509 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
510 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
511 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
512 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
513 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
514 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
527 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
528 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
529 } /* ixgbe_initialize_rss_mapping */
530
531 /************************************************************************
532 * ixgbe_initialize_receive_units - Setup receive registers and features.
533 ************************************************************************/
534 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
535
536 static void
537 ixgbe_initialize_receive_units(struct adapter *adapter)
538 {
539 struct rx_ring *rxr = adapter->rx_rings;
540 struct ixgbe_hw *hw = &adapter->hw;
541 struct ifnet *ifp = adapter->ifp;
542 int i, j;
543 u32 bufsz, fctrl, srrctl, rxcsum;
544 u32 hlreg;
545
546 /*
547 * Make sure receives are disabled while
548 * setting up the descriptor ring
549 */
550 ixgbe_disable_rx(hw);
551
552 /* Enable broadcasts */
553 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
554 fctrl |= IXGBE_FCTRL_BAM;
555 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
556 fctrl |= IXGBE_FCTRL_DPF;
557 fctrl |= IXGBE_FCTRL_PMCF;
558 }
559 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
560
561 /* Set for Jumbo Frames? */
562 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
563 if (ifp->if_mtu > ETHERMTU)
564 hlreg |= IXGBE_HLREG0_JUMBOEN;
565 else
566 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
567
568 #ifdef DEV_NETMAP
569 /* CRC stripping is conditional in Netmap */
570 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
571 (ifp->if_capenable & IFCAP_NETMAP) &&
572 !ix_crcstrip)
573 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
574 else
575 #endif /* DEV_NETMAP */
576 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
577
578 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
579
580 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
581 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
582
583 for (i = 0; i < adapter->num_queues; i++, rxr++) {
584 u64 rdba = rxr->rxdma.dma_paddr;
585 u32 reg;
586 int regnum = i / 4; /* 1 register per 4 queues */
587 int regshift = i % 4; /* 4 bits per 1 queue */
588 j = rxr->me;
589
590 /* Setup the Base and Length of the Rx Descriptor Ring */
591 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
592 (rdba & 0x00000000ffffffffULL));
593 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
594 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
595 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
596
597 /* Set up the SRRCTL register */
598 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
599 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
600 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
601 srrctl |= bufsz;
602 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
603
604 /* Set RQSMR (Receive Queue Statistic Mapping) register */
605 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
606 reg &= ~(0x000000ff << (regshift * 8));
607 reg |= i << (regshift * 8);
608 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
609
610 /*
611 * Set DROP_EN iff we have no flow control and >1 queue.
612 * Note that srrctl was cleared shortly before during reset,
613 * so we do not need to clear the bit, but do it just in case
614 * this code is moved elsewhere.
615 */
616 if (adapter->num_queues > 1 &&
617 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
618 srrctl |= IXGBE_SRRCTL_DROP_EN;
619 } else {
620 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
621 }
622
623 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
624
625 /* Setup the HW Rx Head and Tail Descriptor Pointers */
626 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
627 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
628
629 /* Set the driver rx tail address */
630 rxr->tail = IXGBE_RDT(rxr->me);
631 }
632
633 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
634 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
635 | IXGBE_PSRTYPE_UDPHDR
636 | IXGBE_PSRTYPE_IPV4HDR
637 | IXGBE_PSRTYPE_IPV6HDR;
638 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
639 }
640
641 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
642
643 ixgbe_initialize_rss_mapping(adapter);
644
645 if (adapter->num_queues > 1) {
646 /* RSS and RX IPP Checksum are mutually exclusive */
647 rxcsum |= IXGBE_RXCSUM_PCSD;
648 }
649
650 if (ifp->if_capenable & IFCAP_RXCSUM)
651 rxcsum |= IXGBE_RXCSUM_PCSD;
652
653 /* This is useful for calculating UDP/IP fragment checksums */
654 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
655 rxcsum |= IXGBE_RXCSUM_IPPCSE;
656
657 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
658
659 } /* ixgbe_initialize_receive_units */
660
661 /************************************************************************
662 * ixgbe_initialize_transmit_units - Enable transmit units.
663 ************************************************************************/
664 static void
665 ixgbe_initialize_transmit_units(struct adapter *adapter)
666 {
667 struct tx_ring *txr = adapter->tx_rings;
668 struct ixgbe_hw *hw = &adapter->hw;
669 int i;
670
671 /* Setup the Base and Length of the Tx Descriptor Ring */
672 for (i = 0; i < adapter->num_queues; i++, txr++) {
673 u64 tdba = txr->txdma.dma_paddr;
674 u32 txctrl = 0;
675 u32 tqsmreg, reg;
676 int regnum = i / 4; /* 1 register per 4 queues */
677 int regshift = i % 4; /* 4 bits per 1 queue */
678 int j = txr->me;
679
680 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
681 (tdba & 0x00000000ffffffffULL));
682 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
683 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
684 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
685
686 /*
687 * Set TQSMR (Transmit Queue Statistic Mapping) register.
688 * Register location is different between 82598 and others.
689 */
690 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
691 tqsmreg = IXGBE_TQSMR(regnum);
692 else
693 tqsmreg = IXGBE_TQSM(regnum);
694 reg = IXGBE_READ_REG(hw, tqsmreg);
695 reg &= ~(0x000000ff << (regshift * 8));
696 reg |= i << (regshift * 8);
697 IXGBE_WRITE_REG(hw, tqsmreg, reg);
698
699 /* Setup the HW Tx Head and Tail descriptor pointers */
700 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
701 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
702
703 /* Cache the tail address */
704 txr->tail = IXGBE_TDT(j);
705
706 txr->txr_no_space = false;
707
708 /* Disable Head Writeback */
709 /*
710 * Note: for X550 series devices, these registers are actually
711 * prefixed with TPH_ isntead of DCA_, but the addresses and
712 * fields remain the same.
713 */
714 switch (hw->mac.type) {
715 case ixgbe_mac_82598EB:
716 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
717 break;
718 default:
719 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
720 break;
721 }
722 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
723 switch (hw->mac.type) {
724 case ixgbe_mac_82598EB:
725 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
726 break;
727 default:
728 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
729 break;
730 }
731
732 }
733
734 if (hw->mac.type != ixgbe_mac_82598EB) {
735 u32 dmatxctl, rttdcs;
736
737 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
738 dmatxctl |= IXGBE_DMATXCTL_TE;
739 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
740 /* Disable arbiter to set MTQC */
741 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
742 rttdcs |= IXGBE_RTTDCS_ARBDIS;
743 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
744 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
745 ixgbe_get_mtqc(adapter->iov_mode));
746 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
747 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
748 }
749
750 return;
751 } /* ixgbe_initialize_transmit_units */
752
753 /************************************************************************
754 * ixgbe_attach - Device initialization routine
755 *
756 * Called when the driver is being loaded.
757 * Identifies the type of hardware, allocates all resources
758 * and initializes the hardware.
759 *
760 * return 0 on success, positive on failure
761 ************************************************************************/
762 static void
763 ixgbe_attach(device_t parent, device_t dev, void *aux)
764 {
765 struct adapter *adapter;
766 struct ixgbe_hw *hw;
767 int error = -1;
768 u32 ctrl_ext;
769 u16 high, low, nvmreg;
770 pcireg_t id, subid;
771 const ixgbe_vendor_info_t *ent;
772 struct pci_attach_args *pa = aux;
773 const char *str;
774 char buf[256];
775
776 INIT_DEBUGOUT("ixgbe_attach: begin");
777
778 /* Allocate, clear, and link in our adapter structure */
779 adapter = device_private(dev);
780 adapter->hw.back = adapter;
781 adapter->dev = dev;
782 hw = &adapter->hw;
783 adapter->osdep.pc = pa->pa_pc;
784 adapter->osdep.tag = pa->pa_tag;
785 if (pci_dma64_available(pa))
786 adapter->osdep.dmat = pa->pa_dmat64;
787 else
788 adapter->osdep.dmat = pa->pa_dmat;
789 adapter->osdep.attached = false;
790
791 ent = ixgbe_lookup(pa);
792
793 KASSERT(ent != NULL);
794
795 aprint_normal(": %s, Version - %s\n",
796 ixgbe_strings[ent->index], ixgbe_driver_version);
797
798 /* Core Lock Init*/
799 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
800
801 /* Set up the timer callout */
802 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
803
804 /* Determine hardware revision */
805 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
806 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
807
808 hw->vendor_id = PCI_VENDOR(id);
809 hw->device_id = PCI_PRODUCT(id);
810 hw->revision_id =
811 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
812 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
813 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
814
815 /*
816 * Make sure BUSMASTER is set
817 */
818 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
819
820 /* Do base PCI setup - map BAR0 */
821 if (ixgbe_allocate_pci_resources(adapter, pa)) {
822 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
823 error = ENXIO;
824 goto err_out;
825 }
826
827 /* let hardware know driver is loaded */
828 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
829 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
830 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
831
832 /*
833 * Initialize the shared code
834 */
835 if (ixgbe_init_shared_code(hw) != 0) {
836 aprint_error_dev(dev, "Unable to initialize the shared code\n");
837 error = ENXIO;
838 goto err_out;
839 }
840
841 switch (hw->mac.type) {
842 case ixgbe_mac_82598EB:
843 str = "82598EB";
844 break;
845 case ixgbe_mac_82599EB:
846 str = "82599EB";
847 break;
848 case ixgbe_mac_X540:
849 str = "X540";
850 break;
851 case ixgbe_mac_X550:
852 str = "X550";
853 break;
854 case ixgbe_mac_X550EM_x:
855 str = "X550EM";
856 break;
857 case ixgbe_mac_X550EM_a:
858 str = "X550EM A";
859 break;
860 default:
861 str = "Unknown";
862 break;
863 }
864 aprint_normal_dev(dev, "device %s\n", str);
865
866 if (hw->mbx.ops.init_params)
867 hw->mbx.ops.init_params(hw);
868
869 hw->allow_unsupported_sfp = allow_unsupported_sfp;
870
871 /* Pick up the 82599 settings */
872 if (hw->mac.type != ixgbe_mac_82598EB) {
873 hw->phy.smart_speed = ixgbe_smart_speed;
874 adapter->num_segs = IXGBE_82599_SCATTER;
875 } else
876 adapter->num_segs = IXGBE_82598_SCATTER;
877
878 hw->mac.ops.set_lan_id(hw);
879 ixgbe_init_device_features(adapter);
880
881 if (ixgbe_configure_interrupts(adapter)) {
882 error = ENXIO;
883 goto err_out;
884 }
885
886 /* Allocate multicast array memory. */
887 adapter->mta = malloc(sizeof(*adapter->mta) *
888 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
889 if (adapter->mta == NULL) {
890 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
891 error = ENOMEM;
892 goto err_out;
893 }
894
895 /* Enable WoL (if supported) */
896 ixgbe_check_wol_support(adapter);
897
898 /* Verify adapter fan is still functional (if applicable) */
899 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
900 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
901 ixgbe_check_fan_failure(adapter, esdp, FALSE);
902 }
903
904 /* Ensure SW/FW semaphore is free */
905 ixgbe_init_swfw_semaphore(hw);
906
907 /* Enable EEE power saving */
908 if (adapter->feat_en & IXGBE_FEATURE_EEE)
909 hw->mac.ops.setup_eee(hw, TRUE);
910
911 /* Set an initial default flow control value */
912 hw->fc.requested_mode = ixgbe_flow_control;
913
914 /* Sysctls for limiting the amount of work done in the taskqueues */
915 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
916 "max number of rx packets to process",
917 &adapter->rx_process_limit, ixgbe_rx_process_limit);
918
919 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
920 "max number of tx packets to process",
921 &adapter->tx_process_limit, ixgbe_tx_process_limit);
922
923 /* Do descriptor calc and sanity checks */
924 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
925 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
926 aprint_error_dev(dev, "TXD config issue, using default!\n");
927 adapter->num_tx_desc = DEFAULT_TXD;
928 } else
929 adapter->num_tx_desc = ixgbe_txd;
930
931 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
932 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
933 aprint_error_dev(dev, "RXD config issue, using default!\n");
934 adapter->num_rx_desc = DEFAULT_RXD;
935 } else
936 adapter->num_rx_desc = ixgbe_rxd;
937
938 /* Allocate our TX/RX Queues */
939 if (ixgbe_allocate_queues(adapter)) {
940 error = ENOMEM;
941 goto err_out;
942 }
943
944 hw->phy.reset_if_overtemp = TRUE;
945 error = ixgbe_reset_hw(hw);
946 hw->phy.reset_if_overtemp = FALSE;
947 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
948 /*
949 * No optics in this port, set up
950 * so the timer routine will probe
951 * for later insertion.
952 */
953 adapter->sfp_probe = TRUE;
954 error = IXGBE_SUCCESS;
955 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
956 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
957 error = EIO;
958 goto err_late;
959 } else if (error) {
960 aprint_error_dev(dev, "Hardware initialization failed\n");
961 error = EIO;
962 goto err_late;
963 }
964
965 /* Make sure we have a good EEPROM before we read from it */
966 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
967 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
968 error = EIO;
969 goto err_late;
970 }
971
972 aprint_normal("%s:", device_xname(dev));
973 /* NVM Image Version */
974 switch (hw->mac.type) {
975 case ixgbe_mac_X540:
976 case ixgbe_mac_X550EM_a:
977 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
978 if (nvmreg == 0xffff)
979 break;
980 high = (nvmreg >> 12) & 0x0f;
981 low = (nvmreg >> 4) & 0xff;
982 id = nvmreg & 0x0f;
983 aprint_normal(" NVM Image Version %u.", high);
984 if (hw->mac.type == ixgbe_mac_X540)
985 str = "%x";
986 else
987 str = "%02x";
988 aprint_normal(str, low);
989 aprint_normal(" ID 0x%x,", id);
990 break;
991 case ixgbe_mac_X550EM_x:
992 case ixgbe_mac_X550:
993 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
994 if (nvmreg == 0xffff)
995 break;
996 high = (nvmreg >> 12) & 0x0f;
997 low = nvmreg & 0xff;
998 aprint_normal(" NVM Image Version %u.%02x,", high, low);
999 break;
1000 default:
1001 break;
1002 }
1003
1004 /* PHY firmware revision */
1005 switch (hw->mac.type) {
1006 case ixgbe_mac_X540:
1007 case ixgbe_mac_X550:
1008 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1009 if (nvmreg == 0xffff)
1010 break;
1011 high = (nvmreg >> 12) & 0x0f;
1012 low = (nvmreg >> 4) & 0xff;
1013 id = nvmreg & 0x000f;
1014 aprint_normal(" PHY FW Revision %u.", high);
1015 if (hw->mac.type == ixgbe_mac_X540)
1016 str = "%x";
1017 else
1018 str = "%02x";
1019 aprint_normal(str, low);
1020 aprint_normal(" ID 0x%x,", id);
1021 break;
1022 default:
1023 break;
1024 }
1025
1026 /* NVM Map version & OEM NVM Image version */
1027 switch (hw->mac.type) {
1028 case ixgbe_mac_X550:
1029 case ixgbe_mac_X550EM_x:
1030 case ixgbe_mac_X550EM_a:
1031 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1032 if (nvmreg != 0xffff) {
1033 high = (nvmreg >> 12) & 0x0f;
1034 low = nvmreg & 0x00ff;
1035 aprint_normal(" NVM Map version %u.%02x,", high, low);
1036 }
1037 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1038 if (nvmreg != 0xffff) {
1039 high = (nvmreg >> 12) & 0x0f;
1040 low = nvmreg & 0x00ff;
1041 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1042 low);
1043 }
1044 break;
1045 default:
1046 break;
1047 }
1048
1049 /* Print the ETrackID */
1050 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1051 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1052 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1053
1054 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1055 error = ixgbe_allocate_msix(adapter, pa);
1056 if (error) {
1057 /* Free allocated queue structures first */
1058 ixgbe_free_transmit_structures(adapter);
1059 ixgbe_free_receive_structures(adapter);
1060 free(adapter->queues, M_DEVBUF);
1061
1062 /* Fallback to legacy interrupt */
1063 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1064 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1065 adapter->feat_en |= IXGBE_FEATURE_MSI;
1066 adapter->num_queues = 1;
1067
1068 /* Allocate our TX/RX Queues again */
1069 if (ixgbe_allocate_queues(adapter)) {
1070 error = ENOMEM;
1071 goto err_out;
1072 }
1073 }
1074 }
1075 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1076 error = ixgbe_allocate_legacy(adapter, pa);
1077 if (error)
1078 goto err_late;
1079
1080 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1081 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1082 ixgbe_handle_link, adapter);
1083 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1084 ixgbe_handle_mod, adapter);
1085 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1086 ixgbe_handle_msf, adapter);
1087 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1088 ixgbe_handle_phy, adapter);
1089 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1090 adapter->fdir_si =
1091 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1092 ixgbe_reinit_fdir, adapter);
1093 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1094 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1095 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1096 && (adapter->fdir_si == NULL))) {
1097 aprint_error_dev(dev,
1098 "could not establish software interrupts ()\n");
1099 goto err_out;
1100 }
1101
1102 error = ixgbe_start_hw(hw);
1103 switch (error) {
1104 case IXGBE_ERR_EEPROM_VERSION:
1105 aprint_error_dev(dev, "This device is a pre-production adapter/"
1106 "LOM. Please be aware there may be issues associated "
1107 "with your hardware.\nIf you are experiencing problems "
1108 "please contact your Intel or hardware representative "
1109 "who provided you with this hardware.\n");
1110 break;
1111 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1112 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1113 error = EIO;
1114 goto err_late;
1115 case IXGBE_ERR_SFP_NOT_PRESENT:
1116 aprint_error_dev(dev, "No SFP+ Module found\n");
1117 /* falls thru */
1118 default:
1119 break;
1120 }
1121
1122 /* Setup OS specific network interface */
1123 if (ixgbe_setup_interface(dev, adapter) != 0)
1124 goto err_late;
1125
1126 /*
1127 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1128 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1129 */
1130 if (hw->phy.media_type == ixgbe_media_type_copper) {
1131 uint16_t id1, id2;
1132 int oui, model, rev;
1133 const char *descr;
1134
1135 id1 = hw->phy.id >> 16;
1136 id2 = hw->phy.id & 0xffff;
1137 oui = MII_OUI(id1, id2);
1138 model = MII_MODEL(id2);
1139 rev = MII_REV(id2);
1140 if ((descr = mii_get_descr(oui, model)) != NULL)
1141 aprint_normal_dev(dev,
1142 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1143 descr, oui, model, rev);
1144 else
1145 aprint_normal_dev(dev,
1146 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1147 oui, model, rev);
1148 }
1149
1150 /* Enable the optics for 82599 SFP+ fiber */
1151 ixgbe_enable_tx_laser(hw);
1152
1153 /* Enable power to the phy. */
1154 ixgbe_set_phy_power(hw, TRUE);
1155
1156 /* Initialize statistics */
1157 ixgbe_update_stats_counters(adapter);
1158
1159 /* Check PCIE slot type/speed/width */
1160 ixgbe_get_slot_info(adapter);
1161
1162 /*
1163 * Do time init and sysctl init here, but
1164 * only on the first port of a bypass adapter.
1165 */
1166 ixgbe_bypass_init(adapter);
1167
1168 /* Set an initial dmac value */
1169 adapter->dmac = 0;
1170 /* Set initial advertised speeds (if applicable) */
1171 adapter->advertise = ixgbe_get_advertise(adapter);
1172
1173 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1174 ixgbe_define_iov_schemas(dev, &error);
1175
1176 /* Add sysctls */
1177 ixgbe_add_device_sysctls(adapter);
1178 ixgbe_add_hw_stats(adapter);
1179
1180 /* For Netmap */
1181 adapter->init_locked = ixgbe_init_locked;
1182 adapter->stop_locked = ixgbe_stop;
1183
1184 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1185 ixgbe_netmap_attach(adapter);
1186
1187 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1188 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1189 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1190 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1191
1192 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1193 pmf_class_network_register(dev, adapter->ifp);
1194 else
1195 aprint_error_dev(dev, "couldn't establish power handler\n");
1196
1197 INIT_DEBUGOUT("ixgbe_attach: end");
1198 adapter->osdep.attached = true;
1199
1200 return;
1201
1202 err_late:
1203 ixgbe_free_transmit_structures(adapter);
1204 ixgbe_free_receive_structures(adapter);
1205 free(adapter->queues, M_DEVBUF);
1206 err_out:
1207 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1208 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1209 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1210 ixgbe_free_softint(adapter);
1211 ixgbe_free_pci_resources(adapter);
1212 if (adapter->mta != NULL)
1213 free(adapter->mta, M_DEVBUF);
1214 IXGBE_CORE_LOCK_DESTROY(adapter);
1215
1216 return;
1217 } /* ixgbe_attach */
1218
1219 /************************************************************************
1220 * ixgbe_check_wol_support
1221 *
1222 * Checks whether the adapter's ports are capable of
1223 * Wake On LAN by reading the adapter's NVM.
1224 *
1225 * Sets each port's hw->wol_enabled value depending
1226 * on the value read here.
1227 ************************************************************************/
1228 static void
1229 ixgbe_check_wol_support(struct adapter *adapter)
1230 {
1231 struct ixgbe_hw *hw = &adapter->hw;
1232 u16 dev_caps = 0;
1233
1234 /* Find out WoL support for port */
1235 adapter->wol_support = hw->wol_enabled = 0;
1236 ixgbe_get_device_caps(hw, &dev_caps);
1237 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1238 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1239 hw->bus.func == 0))
1240 adapter->wol_support = hw->wol_enabled = 1;
1241
1242 /* Save initial wake up filter configuration */
1243 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1244
1245 return;
1246 } /* ixgbe_check_wol_support */
1247
1248 /************************************************************************
1249 * ixgbe_setup_interface
1250 *
1251 * Setup networking device structure and register an interface.
1252 ************************************************************************/
1253 static int
1254 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1255 {
1256 struct ethercom *ec = &adapter->osdep.ec;
1257 struct ifnet *ifp;
1258 int rv;
1259
1260 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1261
1262 ifp = adapter->ifp = &ec->ec_if;
1263 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1264 ifp->if_baudrate = IF_Gbps(10);
1265 ifp->if_init = ixgbe_init;
1266 ifp->if_stop = ixgbe_ifstop;
1267 ifp->if_softc = adapter;
1268 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1269 #ifdef IXGBE_MPSAFE
1270 ifp->if_extflags = IFEF_MPSAFE;
1271 #endif
1272 ifp->if_ioctl = ixgbe_ioctl;
1273 #if __FreeBSD_version >= 1100045
1274 /* TSO parameters */
1275 ifp->if_hw_tsomax = 65518;
1276 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1277 ifp->if_hw_tsomaxsegsize = 2048;
1278 #endif
1279 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1280 #if 0
1281 ixgbe_start_locked = ixgbe_legacy_start_locked;
1282 #endif
1283 } else {
1284 ifp->if_transmit = ixgbe_mq_start;
1285 #if 0
1286 ixgbe_start_locked = ixgbe_mq_start_locked;
1287 #endif
1288 }
1289 ifp->if_start = ixgbe_legacy_start;
1290 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1291 IFQ_SET_READY(&ifp->if_snd);
1292
1293 rv = if_initialize(ifp);
1294 if (rv != 0) {
1295 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1296 return rv;
1297 }
1298 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1299 ether_ifattach(ifp, adapter->hw.mac.addr);
1300 /*
1301 * We use per TX queue softint, so if_deferred_start_init() isn't
1302 * used.
1303 */
1304 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1305
1306 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1307
1308 /*
1309 * Tell the upper layer(s) we support long frames.
1310 */
1311 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1312
1313 /* Set capability flags */
1314 ifp->if_capabilities |= IFCAP_RXCSUM
1315 | IFCAP_TXCSUM
1316 | IFCAP_TSOv4
1317 | IFCAP_TSOv6
1318 | IFCAP_LRO;
1319 ifp->if_capenable = 0;
1320
1321 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1322 | ETHERCAP_VLAN_HWCSUM
1323 | ETHERCAP_JUMBO_MTU
1324 | ETHERCAP_VLAN_MTU;
1325
1326 /* Enable the above capabilities by default */
1327 ec->ec_capenable = ec->ec_capabilities;
1328
1329 /*
1330 * Don't turn this on by default, if vlans are
1331 * created on another pseudo device (eg. lagg)
1332 * then vlan events are not passed thru, breaking
1333 * operation, but with HW FILTER off it works. If
1334 * using vlans directly on the ixgbe driver you can
1335 * enable this and get full hardware tag filtering.
1336 */
1337 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1338
1339 /*
1340 * Specify the media types supported by this adapter and register
1341 * callbacks to update media and link information
1342 */
1343 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1344 ixgbe_media_status);
1345
1346 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1347 ixgbe_add_media_types(adapter);
1348
1349 /* Set autoselect media by default */
1350 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1351
1352 if_register(ifp);
1353
1354 return (0);
1355 } /* ixgbe_setup_interface */
1356
1357 /************************************************************************
1358 * ixgbe_add_media_types
1359 ************************************************************************/
1360 static void
1361 ixgbe_add_media_types(struct adapter *adapter)
1362 {
1363 struct ixgbe_hw *hw = &adapter->hw;
1364 device_t dev = adapter->dev;
1365 u64 layer;
1366
1367 layer = adapter->phy_layer;
1368
1369 #define ADD(mm, dd) \
1370 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1371
1372 ADD(IFM_NONE, 0);
1373
1374 /* Media types with matching NetBSD media defines */
1375 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1376 ADD(IFM_10G_T | IFM_FDX, 0);
1377 }
1378 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1379 ADD(IFM_1000_T | IFM_FDX, 0);
1380 }
1381 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1382 ADD(IFM_100_TX | IFM_FDX, 0);
1383 }
1384 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1385 ADD(IFM_10_T | IFM_FDX, 0);
1386 }
1387
1388 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1389 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1390 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1391 }
1392
1393 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1394 ADD(IFM_10G_LR | IFM_FDX, 0);
1395 if (hw->phy.multispeed_fiber) {
1396 ADD(IFM_1000_LX | IFM_FDX, 0);
1397 }
1398 }
1399 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1400 ADD(IFM_10G_SR | IFM_FDX, 0);
1401 if (hw->phy.multispeed_fiber) {
1402 ADD(IFM_1000_SX | IFM_FDX, 0);
1403 }
1404 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1405 ADD(IFM_1000_SX | IFM_FDX, 0);
1406 }
1407 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1408 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1409 }
1410
1411 #ifdef IFM_ETH_XTYPE
1412 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1413 ADD(IFM_10G_KR | IFM_FDX, 0);
1414 }
1415 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1416 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1417 }
1418 #else
1419 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1420 device_printf(dev, "Media supported: 10GbaseKR\n");
1421 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1422 ADD(IFM_10G_SR | IFM_FDX, 0);
1423 }
1424 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1425 device_printf(dev, "Media supported: 10GbaseKX4\n");
1426 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1427 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1428 }
1429 #endif
1430 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1431 ADD(IFM_1000_KX | IFM_FDX, 0);
1432 }
1433 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1434 ADD(IFM_2500_KX | IFM_FDX, 0);
1435 }
1436 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1437 ADD(IFM_2500_T | IFM_FDX, 0);
1438 }
1439 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1440 ADD(IFM_5000_T | IFM_FDX, 0);
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1443 device_printf(dev, "Media supported: 1000baseBX\n");
1444 /* XXX no ifmedia_set? */
1445
1446 ADD(IFM_AUTO, 0);
1447
1448 #undef ADD
1449 } /* ixgbe_add_media_types */
1450
1451 /************************************************************************
1452 * ixgbe_is_sfp
1453 ************************************************************************/
1454 static inline bool
1455 ixgbe_is_sfp(struct ixgbe_hw *hw)
1456 {
1457 switch (hw->mac.type) {
1458 case ixgbe_mac_82598EB:
1459 if (hw->phy.type == ixgbe_phy_nl)
1460 return (TRUE);
1461 return (FALSE);
1462 case ixgbe_mac_82599EB:
1463 switch (hw->mac.ops.get_media_type(hw)) {
1464 case ixgbe_media_type_fiber:
1465 case ixgbe_media_type_fiber_qsfp:
1466 return (TRUE);
1467 default:
1468 return (FALSE);
1469 }
1470 case ixgbe_mac_X550EM_x:
1471 case ixgbe_mac_X550EM_a:
1472 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1473 return (TRUE);
1474 return (FALSE);
1475 default:
1476 return (FALSE);
1477 }
1478 } /* ixgbe_is_sfp */
1479
1480 /************************************************************************
1481 * ixgbe_config_link
1482 ************************************************************************/
1483 static void
1484 ixgbe_config_link(struct adapter *adapter)
1485 {
1486 struct ixgbe_hw *hw = &adapter->hw;
1487 u32 autoneg, err = 0;
1488 bool sfp, negotiate = false;
1489
1490 sfp = ixgbe_is_sfp(hw);
1491
1492 if (sfp) {
1493 if (hw->phy.multispeed_fiber) {
1494 ixgbe_enable_tx_laser(hw);
1495 kpreempt_disable();
1496 softint_schedule(adapter->msf_si);
1497 kpreempt_enable();
1498 }
1499 kpreempt_disable();
1500 softint_schedule(adapter->mod_si);
1501 kpreempt_enable();
1502 } else {
1503 struct ifmedia *ifm = &adapter->media;
1504
1505 if (hw->mac.ops.check_link)
1506 err = ixgbe_check_link(hw, &adapter->link_speed,
1507 &adapter->link_up, FALSE);
1508 if (err)
1509 return;
1510
1511 /*
1512 * Check if it's the first call. If it's the first call,
1513 * get value for auto negotiation.
1514 */
1515 autoneg = hw->phy.autoneg_advertised;
1516 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1517 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1518 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1519 &negotiate);
1520 if (err)
1521 return;
1522 if (hw->mac.ops.setup_link)
1523 err = hw->mac.ops.setup_link(hw, autoneg,
1524 adapter->link_up);
1525 }
1526
1527 } /* ixgbe_config_link */
1528
1529 /************************************************************************
1530 * ixgbe_update_stats_counters - Update board statistics counters.
1531 ************************************************************************/
1532 static void
1533 ixgbe_update_stats_counters(struct adapter *adapter)
1534 {
1535 struct ifnet *ifp = adapter->ifp;
1536 struct ixgbe_hw *hw = &adapter->hw;
1537 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1538 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1539 u64 total_missed_rx = 0;
1540 uint64_t crcerrs, rlec;
1541
1542 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1543 stats->crcerrs.ev_count += crcerrs;
1544 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1545 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1546 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1547 if (hw->mac.type == ixgbe_mac_X550)
1548 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1549
1550 /* 16 registers */
1551 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1552 int j = i % adapter->num_queues;
1553
1554 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1555 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1556 if (hw->mac.type >= ixgbe_mac_82599EB) {
1557 stats->qprdc[j].ev_count
1558 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1559 }
1560 }
1561
1562 /* 8 registers */
1563 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1564 uint32_t mp;
1565 int j = i % adapter->num_queues;
1566
1567 /* MPC */
1568 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1569 /* global total per queue */
1570 stats->mpc[j].ev_count += mp;
1571 /* running comprehensive total for stats display */
1572 total_missed_rx += mp;
1573
1574 if (hw->mac.type == ixgbe_mac_82598EB)
1575 stats->rnbc[j].ev_count
1576 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1577
1578 stats->pxontxc[j].ev_count
1579 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1580 stats->pxofftxc[j].ev_count
1581 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1582 if (hw->mac.type >= ixgbe_mac_82599EB) {
1583 stats->pxonrxc[j].ev_count
1584 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1585 stats->pxoffrxc[j].ev_count
1586 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1587 stats->pxon2offc[j].ev_count
1588 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1589 } else {
1590 stats->pxonrxc[j].ev_count
1591 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1592 stats->pxoffrxc[j].ev_count
1593 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1594 }
1595 }
1596 stats->mpctotal.ev_count += total_missed_rx;
1597
1598 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1599 if ((adapter->link_active == TRUE)
1600 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1601 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1602 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1603 }
1604 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1605 stats->rlec.ev_count += rlec;
1606
1607 /* Hardware workaround, gprc counts missed packets */
1608 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1609
1610 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1611 stats->lxontxc.ev_count += lxon;
1612 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1613 stats->lxofftxc.ev_count += lxoff;
1614 total = lxon + lxoff;
1615
1616 if (hw->mac.type != ixgbe_mac_82598EB) {
1617 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1618 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1619 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1620 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1621 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1622 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1623 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1624 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1625 } else {
1626 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1627 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1628 /* 82598 only has a counter in the high register */
1629 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1630 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1631 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1632 }
1633
1634 /*
1635 * Workaround: mprc hardware is incorrectly counting
1636 * broadcasts, so for now we subtract those.
1637 */
1638 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1639 stats->bprc.ev_count += bprc;
1640 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1641 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1642
1643 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1644 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1645 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1646 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1647 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1648 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1649
1650 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1651 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1652 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1653
1654 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1655 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1656 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1657 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1658 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1659 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1660 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1661 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1662 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1663 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1664 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1665 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1666 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1667 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1668 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1669 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1670 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1671 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1672 /* Only read FCOE on 82599 */
1673 if (hw->mac.type != ixgbe_mac_82598EB) {
1674 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1675 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1676 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1677 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1678 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1679 }
1680
1681 /* Fill out the OS statistics structure */
1682 /*
1683 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1684 * adapter->stats counters. It's required to make ifconfig -z
1685 * (SOICZIFDATA) work.
1686 */
1687 ifp->if_collisions = 0;
1688
1689 /* Rx Errors */
1690 ifp->if_iqdrops += total_missed_rx;
1691 ifp->if_ierrors += crcerrs + rlec;
1692 } /* ixgbe_update_stats_counters */
1693
1694 /************************************************************************
1695 * ixgbe_add_hw_stats
1696 *
1697 * Add sysctl variables, one per statistic, to the system.
1698 ************************************************************************/
1699 static void
1700 ixgbe_add_hw_stats(struct adapter *adapter)
1701 {
1702 device_t dev = adapter->dev;
1703 const struct sysctlnode *rnode, *cnode;
1704 struct sysctllog **log = &adapter->sysctllog;
1705 struct tx_ring *txr = adapter->tx_rings;
1706 struct rx_ring *rxr = adapter->rx_rings;
1707 struct ixgbe_hw *hw = &adapter->hw;
1708 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1709 const char *xname = device_xname(dev);
1710 int i;
1711
1712 /* Driver Statistics */
1713 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1714 NULL, xname, "Driver tx dma soft fail EFBIG");
1715 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1716 NULL, xname, "m_defrag() failed");
1717 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1718 NULL, xname, "Driver tx dma hard fail EFBIG");
1719 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1720 NULL, xname, "Driver tx dma hard fail EINVAL");
1721 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1722 NULL, xname, "Driver tx dma hard fail other");
1723 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1724 NULL, xname, "Driver tx dma soft fail EAGAIN");
1725 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1726 NULL, xname, "Driver tx dma soft fail ENOMEM");
1727 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1728 NULL, xname, "Watchdog timeouts");
1729 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1730 NULL, xname, "TSO errors");
1731 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1732 NULL, xname, "Link MSI-X IRQ Handled");
1733 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1734 NULL, xname, "Link softint");
1735 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1736 NULL, xname, "module softint");
1737 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1738 NULL, xname, "multimode softint");
1739 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1740 NULL, xname, "external PHY softint");
1741
1742 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1743 #ifdef LRO
1744 struct lro_ctrl *lro = &rxr->lro;
1745 #endif /* LRO */
1746
1747 snprintf(adapter->queues[i].evnamebuf,
1748 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1749 xname, i);
1750 snprintf(adapter->queues[i].namebuf,
1751 sizeof(adapter->queues[i].namebuf), "q%d", i);
1752
1753 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1754 aprint_error_dev(dev, "could not create sysctl root\n");
1755 break;
1756 }
1757
1758 if (sysctl_createv(log, 0, &rnode, &rnode,
1759 0, CTLTYPE_NODE,
1760 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1761 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1762 break;
1763
1764 if (sysctl_createv(log, 0, &rnode, &cnode,
1765 CTLFLAG_READWRITE, CTLTYPE_INT,
1766 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1767 ixgbe_sysctl_interrupt_rate_handler, 0,
1768 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1769 break;
1770
1771 if (sysctl_createv(log, 0, &rnode, &cnode,
1772 CTLFLAG_READONLY, CTLTYPE_INT,
1773 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1774 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1775 0, CTL_CREATE, CTL_EOL) != 0)
1776 break;
1777
1778 if (sysctl_createv(log, 0, &rnode, &cnode,
1779 CTLFLAG_READONLY, CTLTYPE_INT,
1780 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1781 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1782 0, CTL_CREATE, CTL_EOL) != 0)
1783 break;
1784
1785 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1786 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1787 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1788 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1789 "Handled queue in softint");
1790 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1791 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1792 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1793 NULL, adapter->queues[i].evnamebuf, "TSO");
1794 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1795 NULL, adapter->queues[i].evnamebuf,
1796 "Queue No Descriptor Available");
1797 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1798 NULL, adapter->queues[i].evnamebuf,
1799 "Queue Packets Transmitted");
1800 #ifndef IXGBE_LEGACY_TX
1801 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1802 NULL, adapter->queues[i].evnamebuf,
1803 "Packets dropped in pcq");
1804 #endif
1805
1806 if (sysctl_createv(log, 0, &rnode, &cnode,
1807 CTLFLAG_READONLY,
1808 CTLTYPE_INT,
1809 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1810 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1811 CTL_CREATE, CTL_EOL) != 0)
1812 break;
1813
1814 if (sysctl_createv(log, 0, &rnode, &cnode,
1815 CTLFLAG_READONLY,
1816 CTLTYPE_INT,
1817 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1818 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1819 CTL_CREATE, CTL_EOL) != 0)
1820 break;
1821
1822 if (sysctl_createv(log, 0, &rnode, &cnode,
1823 CTLFLAG_READONLY,
1824 CTLTYPE_INT,
1825 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1826 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1827 CTL_CREATE, CTL_EOL) != 0)
1828 break;
1829
1830 if (i < __arraycount(stats->mpc)) {
1831 evcnt_attach_dynamic(&stats->mpc[i],
1832 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1833 "RX Missed Packet Count");
1834 if (hw->mac.type == ixgbe_mac_82598EB)
1835 evcnt_attach_dynamic(&stats->rnbc[i],
1836 EVCNT_TYPE_MISC, NULL,
1837 adapter->queues[i].evnamebuf,
1838 "Receive No Buffers");
1839 }
1840 if (i < __arraycount(stats->pxontxc)) {
1841 evcnt_attach_dynamic(&stats->pxontxc[i],
1842 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1843 "pxontxc");
1844 evcnt_attach_dynamic(&stats->pxonrxc[i],
1845 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1846 "pxonrxc");
1847 evcnt_attach_dynamic(&stats->pxofftxc[i],
1848 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1849 "pxofftxc");
1850 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1851 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1852 "pxoffrxc");
1853 if (hw->mac.type >= ixgbe_mac_82599EB)
1854 evcnt_attach_dynamic(&stats->pxon2offc[i],
1855 EVCNT_TYPE_MISC, NULL,
1856 adapter->queues[i].evnamebuf,
1857 "pxon2offc");
1858 }
1859 if (i < __arraycount(stats->qprc)) {
1860 evcnt_attach_dynamic(&stats->qprc[i],
1861 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1862 "qprc");
1863 evcnt_attach_dynamic(&stats->qptc[i],
1864 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1865 "qptc");
1866 evcnt_attach_dynamic(&stats->qbrc[i],
1867 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1868 "qbrc");
1869 evcnt_attach_dynamic(&stats->qbtc[i],
1870 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1871 "qbtc");
1872 if (hw->mac.type >= ixgbe_mac_82599EB)
1873 evcnt_attach_dynamic(&stats->qprdc[i],
1874 EVCNT_TYPE_MISC, NULL,
1875 adapter->queues[i].evnamebuf, "qprdc");
1876 }
1877
1878 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1879 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1880 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1881 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1882 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1883 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1884 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1885 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1886 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1887 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1888 #ifdef LRO
1889 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1890 CTLFLAG_RD, &lro->lro_queued, 0,
1891 "LRO Queued");
1892 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1893 CTLFLAG_RD, &lro->lro_flushed, 0,
1894 "LRO Flushed");
1895 #endif /* LRO */
1896 }
1897
1898 /* MAC stats get their own sub node */
1899
1900 snprintf(stats->namebuf,
1901 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1902
1903 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1904 stats->namebuf, "rx csum offload - IP");
1905 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1906 stats->namebuf, "rx csum offload - L4");
1907 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1908 stats->namebuf, "rx csum offload - IP bad");
1909 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1910 stats->namebuf, "rx csum offload - L4 bad");
1911 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1912 stats->namebuf, "Interrupt conditions zero");
1913 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1914 stats->namebuf, "Legacy interrupts");
1915
1916 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1917 stats->namebuf, "CRC Errors");
1918 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1919 stats->namebuf, "Illegal Byte Errors");
1920 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1921 stats->namebuf, "Byte Errors");
1922 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1923 stats->namebuf, "MAC Short Packets Discarded");
1924 if (hw->mac.type >= ixgbe_mac_X550)
1925 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1926 stats->namebuf, "Bad SFD");
1927 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1928 stats->namebuf, "Total Packets Missed");
1929 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1930 stats->namebuf, "MAC Local Faults");
1931 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1932 stats->namebuf, "MAC Remote Faults");
1933 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1934 stats->namebuf, "Receive Length Errors");
1935 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1936 stats->namebuf, "Link XON Transmitted");
1937 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1938 stats->namebuf, "Link XON Received");
1939 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "Link XOFF Transmitted");
1941 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "Link XOFF Received");
1943
1944 /* Packet Reception Stats */
1945 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "Total Octets Received");
1947 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Good Octets Received");
1949 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "Total Packets Received");
1951 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1952 stats->namebuf, "Good Packets Received");
1953 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "Multicast Packets Received");
1955 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "Broadcast Packets Received");
1957 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "64 byte frames received ");
1959 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "65-127 byte frames received");
1961 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "128-255 byte frames received");
1963 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "256-511 byte frames received");
1965 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "512-1023 byte frames received");
1967 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "1023-1522 byte frames received");
1969 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Receive Undersized");
1971 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "Fragmented Packets Received ");
1973 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Oversized Packets Received");
1975 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Received Jabber");
1977 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "Management Packets Received");
1979 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "Management Packets Dropped");
1981 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Checksum Errors");
1983
1984 /* Packet Transmission Stats */
1985 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Good Octets Transmitted");
1987 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Total Packets Transmitted");
1989 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Good Packets Transmitted");
1991 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Broadcast Packets Transmitted");
1993 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "Multicast Packets Transmitted");
1995 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "Management Packets Transmitted");
1997 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "64 byte frames transmitted ");
1999 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "65-127 byte frames transmitted");
2001 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "128-255 byte frames transmitted");
2003 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "256-511 byte frames transmitted");
2005 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "512-1023 byte frames transmitted");
2007 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "1024-1522 byte frames transmitted");
2009 } /* ixgbe_add_hw_stats */
2010
2011 static void
2012 ixgbe_clear_evcnt(struct adapter *adapter)
2013 {
2014 struct tx_ring *txr = adapter->tx_rings;
2015 struct rx_ring *rxr = adapter->rx_rings;
2016 struct ixgbe_hw *hw = &adapter->hw;
2017 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2018
2019 adapter->efbig_tx_dma_setup.ev_count = 0;
2020 adapter->mbuf_defrag_failed.ev_count = 0;
2021 adapter->efbig2_tx_dma_setup.ev_count = 0;
2022 adapter->einval_tx_dma_setup.ev_count = 0;
2023 adapter->other_tx_dma_setup.ev_count = 0;
2024 adapter->eagain_tx_dma_setup.ev_count = 0;
2025 adapter->enomem_tx_dma_setup.ev_count = 0;
2026 adapter->tso_err.ev_count = 0;
2027 adapter->watchdog_events.ev_count = 0;
2028 adapter->link_irq.ev_count = 0;
2029 adapter->link_sicount.ev_count = 0;
2030 adapter->mod_sicount.ev_count = 0;
2031 adapter->msf_sicount.ev_count = 0;
2032 adapter->phy_sicount.ev_count = 0;
2033
2034 txr = adapter->tx_rings;
2035 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2036 adapter->queues[i].irqs.ev_count = 0;
2037 adapter->queues[i].handleq.ev_count = 0;
2038 adapter->queues[i].req.ev_count = 0;
2039 txr->no_desc_avail.ev_count = 0;
2040 txr->total_packets.ev_count = 0;
2041 txr->tso_tx.ev_count = 0;
2042 #ifndef IXGBE_LEGACY_TX
2043 txr->pcq_drops.ev_count = 0;
2044 #endif
2045 txr->q_efbig_tx_dma_setup = 0;
2046 txr->q_mbuf_defrag_failed = 0;
2047 txr->q_efbig2_tx_dma_setup = 0;
2048 txr->q_einval_tx_dma_setup = 0;
2049 txr->q_other_tx_dma_setup = 0;
2050 txr->q_eagain_tx_dma_setup = 0;
2051 txr->q_enomem_tx_dma_setup = 0;
2052 txr->q_tso_err = 0;
2053
2054 if (i < __arraycount(stats->mpc)) {
2055 stats->mpc[i].ev_count = 0;
2056 if (hw->mac.type == ixgbe_mac_82598EB)
2057 stats->rnbc[i].ev_count = 0;
2058 }
2059 if (i < __arraycount(stats->pxontxc)) {
2060 stats->pxontxc[i].ev_count = 0;
2061 stats->pxonrxc[i].ev_count = 0;
2062 stats->pxofftxc[i].ev_count = 0;
2063 stats->pxoffrxc[i].ev_count = 0;
2064 if (hw->mac.type >= ixgbe_mac_82599EB)
2065 stats->pxon2offc[i].ev_count = 0;
2066 }
2067 if (i < __arraycount(stats->qprc)) {
2068 stats->qprc[i].ev_count = 0;
2069 stats->qptc[i].ev_count = 0;
2070 stats->qbrc[i].ev_count = 0;
2071 stats->qbtc[i].ev_count = 0;
2072 if (hw->mac.type >= ixgbe_mac_82599EB)
2073 stats->qprdc[i].ev_count = 0;
2074 }
2075
2076 rxr->rx_packets.ev_count = 0;
2077 rxr->rx_bytes.ev_count = 0;
2078 rxr->rx_copies.ev_count = 0;
2079 rxr->no_jmbuf.ev_count = 0;
2080 rxr->rx_discarded.ev_count = 0;
2081 }
2082 stats->ipcs.ev_count = 0;
2083 stats->l4cs.ev_count = 0;
2084 stats->ipcs_bad.ev_count = 0;
2085 stats->l4cs_bad.ev_count = 0;
2086 stats->intzero.ev_count = 0;
2087 stats->legint.ev_count = 0;
2088 stats->crcerrs.ev_count = 0;
2089 stats->illerrc.ev_count = 0;
2090 stats->errbc.ev_count = 0;
2091 stats->mspdc.ev_count = 0;
2092 stats->mbsdc.ev_count = 0;
2093 stats->mpctotal.ev_count = 0;
2094 stats->mlfc.ev_count = 0;
2095 stats->mrfc.ev_count = 0;
2096 stats->rlec.ev_count = 0;
2097 stats->lxontxc.ev_count = 0;
2098 stats->lxonrxc.ev_count = 0;
2099 stats->lxofftxc.ev_count = 0;
2100 stats->lxoffrxc.ev_count = 0;
2101
2102 /* Packet Reception Stats */
2103 stats->tor.ev_count = 0;
2104 stats->gorc.ev_count = 0;
2105 stats->tpr.ev_count = 0;
2106 stats->gprc.ev_count = 0;
2107 stats->mprc.ev_count = 0;
2108 stats->bprc.ev_count = 0;
2109 stats->prc64.ev_count = 0;
2110 stats->prc127.ev_count = 0;
2111 stats->prc255.ev_count = 0;
2112 stats->prc511.ev_count = 0;
2113 stats->prc1023.ev_count = 0;
2114 stats->prc1522.ev_count = 0;
2115 stats->ruc.ev_count = 0;
2116 stats->rfc.ev_count = 0;
2117 stats->roc.ev_count = 0;
2118 stats->rjc.ev_count = 0;
2119 stats->mngprc.ev_count = 0;
2120 stats->mngpdc.ev_count = 0;
2121 stats->xec.ev_count = 0;
2122
2123 /* Packet Transmission Stats */
2124 stats->gotc.ev_count = 0;
2125 stats->tpt.ev_count = 0;
2126 stats->gptc.ev_count = 0;
2127 stats->bptc.ev_count = 0;
2128 stats->mptc.ev_count = 0;
2129 stats->mngptc.ev_count = 0;
2130 stats->ptc64.ev_count = 0;
2131 stats->ptc127.ev_count = 0;
2132 stats->ptc255.ev_count = 0;
2133 stats->ptc511.ev_count = 0;
2134 stats->ptc1023.ev_count = 0;
2135 stats->ptc1522.ev_count = 0;
2136 }
2137
2138 /************************************************************************
2139 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2140 *
2141 * Retrieves the TDH value from the hardware
2142 ************************************************************************/
2143 static int
2144 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2145 {
2146 struct sysctlnode node = *rnode;
2147 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2148 uint32_t val;
2149
2150 if (!txr)
2151 return (0);
2152
2153 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2154 node.sysctl_data = &val;
2155 return sysctl_lookup(SYSCTLFN_CALL(&node));
2156 } /* ixgbe_sysctl_tdh_handler */
2157
2158 /************************************************************************
2159 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2160 *
2161 * Retrieves the TDT value from the hardware
2162 ************************************************************************/
2163 static int
2164 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2165 {
2166 struct sysctlnode node = *rnode;
2167 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2168 uint32_t val;
2169
2170 if (!txr)
2171 return (0);
2172
2173 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2174 node.sysctl_data = &val;
2175 return sysctl_lookup(SYSCTLFN_CALL(&node));
2176 } /* ixgbe_sysctl_tdt_handler */
2177
2178 /************************************************************************
2179 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2180 * handler function
2181 *
2182 * Retrieves the next_to_check value
2183 ************************************************************************/
2184 static int
2185 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2186 {
2187 struct sysctlnode node = *rnode;
2188 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2189 uint32_t val;
2190
2191 if (!rxr)
2192 return (0);
2193
2194 val = rxr->next_to_check;
2195 node.sysctl_data = &val;
2196 return sysctl_lookup(SYSCTLFN_CALL(&node));
2197 } /* ixgbe_sysctl_next_to_check_handler */
2198
2199 /************************************************************************
2200 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2201 *
2202 * Retrieves the RDH value from the hardware
2203 ************************************************************************/
2204 static int
2205 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2206 {
2207 struct sysctlnode node = *rnode;
2208 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2209 uint32_t val;
2210
2211 if (!rxr)
2212 return (0);
2213
2214 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2215 node.sysctl_data = &val;
2216 return sysctl_lookup(SYSCTLFN_CALL(&node));
2217 } /* ixgbe_sysctl_rdh_handler */
2218
2219 /************************************************************************
2220 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2221 *
2222 * Retrieves the RDT value from the hardware
2223 ************************************************************************/
2224 static int
2225 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2226 {
2227 struct sysctlnode node = *rnode;
2228 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2229 uint32_t val;
2230
2231 if (!rxr)
2232 return (0);
2233
2234 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2235 node.sysctl_data = &val;
2236 return sysctl_lookup(SYSCTLFN_CALL(&node));
2237 } /* ixgbe_sysctl_rdt_handler */
2238
2239 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2240 /************************************************************************
2241 * ixgbe_register_vlan
2242 *
2243 * Run via vlan config EVENT, it enables us to use the
2244 * HW Filter table since we can get the vlan id. This
2245 * just creates the entry in the soft version of the
2246 * VFTA, init will repopulate the real table.
2247 ************************************************************************/
2248 static void
2249 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2250 {
2251 struct adapter *adapter = ifp->if_softc;
2252 u16 index, bit;
2253
2254 if (ifp->if_softc != arg) /* Not our event */
2255 return;
2256
2257 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2258 return;
2259
2260 IXGBE_CORE_LOCK(adapter);
2261 index = (vtag >> 5) & 0x7F;
2262 bit = vtag & 0x1F;
2263 adapter->shadow_vfta[index] |= (1 << bit);
2264 ixgbe_setup_vlan_hw_support(adapter);
2265 IXGBE_CORE_UNLOCK(adapter);
2266 } /* ixgbe_register_vlan */
2267
2268 /************************************************************************
2269 * ixgbe_unregister_vlan
2270 *
2271 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2272 ************************************************************************/
2273 static void
2274 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2275 {
2276 struct adapter *adapter = ifp->if_softc;
2277 u16 index, bit;
2278
2279 if (ifp->if_softc != arg)
2280 return;
2281
2282 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2283 return;
2284
2285 IXGBE_CORE_LOCK(adapter);
2286 index = (vtag >> 5) & 0x7F;
2287 bit = vtag & 0x1F;
2288 adapter->shadow_vfta[index] &= ~(1 << bit);
2289 /* Re-init to load the changes */
2290 ixgbe_setup_vlan_hw_support(adapter);
2291 IXGBE_CORE_UNLOCK(adapter);
2292 } /* ixgbe_unregister_vlan */
2293 #endif
2294
2295 static void
2296 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2297 {
2298 struct ethercom *ec = &adapter->osdep.ec;
2299 struct ixgbe_hw *hw = &adapter->hw;
2300 struct rx_ring *rxr;
2301 int i;
2302 u32 ctrl;
2303
2304
2305 /*
2306 * We get here thru init_locked, meaning
2307 * a soft reset, this has already cleared
2308 * the VFTA and other state, so if there
2309 * have been no vlan's registered do nothing.
2310 */
2311 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2312 return;
2313
2314 /* Setup the queues for vlans */
2315 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2316 for (i = 0; i < adapter->num_queues; i++) {
2317 rxr = &adapter->rx_rings[i];
2318 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2319 if (hw->mac.type != ixgbe_mac_82598EB) {
2320 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2321 ctrl |= IXGBE_RXDCTL_VME;
2322 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2323 }
2324 rxr->vtag_strip = TRUE;
2325 }
2326 }
2327
2328 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2329 return;
2330 /*
2331 * A soft reset zero's out the VFTA, so
2332 * we need to repopulate it now.
2333 */
2334 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2335 if (adapter->shadow_vfta[i] != 0)
2336 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2337 adapter->shadow_vfta[i]);
2338
2339 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2340 /* Enable the Filter Table if enabled */
2341 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2342 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2343 ctrl |= IXGBE_VLNCTRL_VFE;
2344 }
2345 if (hw->mac.type == ixgbe_mac_82598EB)
2346 ctrl |= IXGBE_VLNCTRL_VME;
2347 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2348 } /* ixgbe_setup_vlan_hw_support */
2349
2350 /************************************************************************
2351 * ixgbe_get_slot_info
2352 *
2353 * Get the width and transaction speed of
2354 * the slot this adapter is plugged into.
2355 ************************************************************************/
2356 static void
2357 ixgbe_get_slot_info(struct adapter *adapter)
2358 {
2359 device_t dev = adapter->dev;
2360 struct ixgbe_hw *hw = &adapter->hw;
2361 u32 offset;
2362 u16 link;
2363 int bus_info_valid = TRUE;
2364
2365 /* Some devices are behind an internal bridge */
2366 switch (hw->device_id) {
2367 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2368 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2369 goto get_parent_info;
2370 default:
2371 break;
2372 }
2373
2374 ixgbe_get_bus_info(hw);
2375
2376 /*
2377 * Some devices don't use PCI-E, but there is no need
2378 * to display "Unknown" for bus speed and width.
2379 */
2380 switch (hw->mac.type) {
2381 case ixgbe_mac_X550EM_x:
2382 case ixgbe_mac_X550EM_a:
2383 return;
2384 default:
2385 goto display;
2386 }
2387
2388 get_parent_info:
2389 /*
2390 * For the Quad port adapter we need to parse back
2391 * up the PCI tree to find the speed of the expansion
2392 * slot into which this adapter is plugged. A bit more work.
2393 */
2394 dev = device_parent(device_parent(dev));
2395 #if 0
2396 #ifdef IXGBE_DEBUG
2397 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2398 pci_get_slot(dev), pci_get_function(dev));
2399 #endif
2400 dev = device_parent(device_parent(dev));
2401 #ifdef IXGBE_DEBUG
2402 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2403 pci_get_slot(dev), pci_get_function(dev));
2404 #endif
2405 #endif
2406 /* Now get the PCI Express Capabilities offset */
2407 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2408 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2409 /*
2410 * Hmm...can't get PCI-Express capabilities.
2411 * Falling back to default method.
2412 */
2413 bus_info_valid = FALSE;
2414 ixgbe_get_bus_info(hw);
2415 goto display;
2416 }
2417 /* ...and read the Link Status Register */
2418 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2419 offset + PCIE_LCSR) >> 16;
2420 ixgbe_set_pci_config_data_generic(hw, link);
2421
2422 display:
2423 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2424 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2425 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2426 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2427 "Unknown"),
2428 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2429 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2430 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2431 "Unknown"));
2432
2433 if (bus_info_valid) {
2434 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2435 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2436 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2437 device_printf(dev, "PCI-Express bandwidth available"
2438 " for this card\n is not sufficient for"
2439 " optimal performance.\n");
2440 device_printf(dev, "For optimal performance a x8 "
2441 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2442 }
2443 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2444 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2445 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2446 device_printf(dev, "PCI-Express bandwidth available"
2447 " for this card\n is not sufficient for"
2448 " optimal performance.\n");
2449 device_printf(dev, "For optimal performance a x8 "
2450 "PCIE Gen3 slot is required.\n");
2451 }
2452 } else
2453 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2454
2455 return;
2456 } /* ixgbe_get_slot_info */
2457
2458 /************************************************************************
2459 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2460 ************************************************************************/
2461 static inline void
2462 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2463 {
2464 struct ixgbe_hw *hw = &adapter->hw;
2465 struct ix_queue *que = &adapter->queues[vector];
2466 u64 queue = (u64)(1ULL << vector);
2467 u32 mask;
2468
2469 mutex_enter(&que->dc_mtx);
2470 if (que->disabled_count > 0 && --que->disabled_count > 0)
2471 goto out;
2472
2473 if (hw->mac.type == ixgbe_mac_82598EB) {
2474 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2475 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2476 } else {
2477 mask = (queue & 0xFFFFFFFF);
2478 if (mask)
2479 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2480 mask = (queue >> 32);
2481 if (mask)
2482 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2483 }
2484 out:
2485 mutex_exit(&que->dc_mtx);
2486 } /* ixgbe_enable_queue */
2487
2488 /************************************************************************
2489 * ixgbe_disable_queue_internal
2490 ************************************************************************/
2491 static inline void
2492 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2493 {
2494 struct ixgbe_hw *hw = &adapter->hw;
2495 struct ix_queue *que = &adapter->queues[vector];
2496 u64 queue = (u64)(1ULL << vector);
2497 u32 mask;
2498
2499 mutex_enter(&que->dc_mtx);
2500
2501 if (que->disabled_count > 0) {
2502 if (nestok)
2503 que->disabled_count++;
2504 goto out;
2505 }
2506 que->disabled_count++;
2507
2508 if (hw->mac.type == ixgbe_mac_82598EB) {
2509 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2510 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2511 } else {
2512 mask = (queue & 0xFFFFFFFF);
2513 if (mask)
2514 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2515 mask = (queue >> 32);
2516 if (mask)
2517 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2518 }
2519 out:
2520 mutex_exit(&que->dc_mtx);
2521 } /* ixgbe_disable_queue_internal */
2522
2523 /************************************************************************
2524 * ixgbe_disable_queue
2525 ************************************************************************/
2526 static inline void
2527 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2528 {
2529
2530 ixgbe_disable_queue_internal(adapter, vector, true);
2531 } /* ixgbe_disable_queue */
2532
2533 /************************************************************************
2534 * ixgbe_sched_handle_que - schedule deferred packet processing
2535 ************************************************************************/
2536 static inline void
2537 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2538 {
2539
2540 if(que->txrx_use_workqueue) {
2541 /*
2542 * adapter->que_wq is bound to each CPU instead of
2543 * each NIC queue to reduce workqueue kthread. As we
2544 * should consider about interrupt affinity in this
2545 * function, the workqueue kthread must be WQ_PERCPU.
2546 * If create WQ_PERCPU workqueue kthread for each NIC
2547 * queue, that number of created workqueue kthread is
2548 * (number of used NIC queue) * (number of CPUs) =
2549 * (number of CPUs) ^ 2 most often.
2550 *
2551 * The same NIC queue's interrupts are avoided by
2552 * masking the queue's interrupt. And different
2553 * NIC queue's interrupts use different struct work
2554 * (que->wq_cookie). So, "enqueued flag" to avoid
2555 * twice workqueue_enqueue() is not required .
2556 */
2557 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2558 } else {
2559 softint_schedule(que->que_si);
2560 }
2561 }
2562
2563 /************************************************************************
2564 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2565 ************************************************************************/
2566 static int
2567 ixgbe_msix_que(void *arg)
2568 {
2569 struct ix_queue *que = arg;
2570 struct adapter *adapter = que->adapter;
2571 struct ifnet *ifp = adapter->ifp;
2572 struct tx_ring *txr = que->txr;
2573 struct rx_ring *rxr = que->rxr;
2574 bool more;
2575 u32 newitr = 0;
2576
2577 /* Protect against spurious interrupts */
2578 if ((ifp->if_flags & IFF_RUNNING) == 0)
2579 return 0;
2580
2581 ixgbe_disable_queue(adapter, que->msix);
2582 ++que->irqs.ev_count;
2583
2584 /*
2585 * Don't change "que->txrx_use_workqueue" from this point to avoid
2586 * flip-flopping softint/workqueue mode in one deferred processing.
2587 */
2588 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2589
2590 #ifdef __NetBSD__
2591 /* Don't run ixgbe_rxeof in interrupt context */
2592 more = true;
2593 #else
2594 more = ixgbe_rxeof(que);
2595 #endif
2596
2597 IXGBE_TX_LOCK(txr);
2598 ixgbe_txeof(txr);
2599 IXGBE_TX_UNLOCK(txr);
2600
2601 /* Do AIM now? */
2602
2603 if (adapter->enable_aim == false)
2604 goto no_calc;
2605 /*
2606 * Do Adaptive Interrupt Moderation:
2607 * - Write out last calculated setting
2608 * - Calculate based on average size over
2609 * the last interval.
2610 */
2611 if (que->eitr_setting)
2612 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2613
2614 que->eitr_setting = 0;
2615
2616 /* Idle, do nothing */
2617 if ((txr->bytes == 0) && (rxr->bytes == 0))
2618 goto no_calc;
2619
2620 if ((txr->bytes) && (txr->packets))
2621 newitr = txr->bytes/txr->packets;
2622 if ((rxr->bytes) && (rxr->packets))
2623 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2624 newitr += 24; /* account for hardware frame, crc */
2625
2626 /* set an upper boundary */
2627 newitr = uimin(newitr, 3000);
2628
2629 /* Be nice to the mid range */
2630 if ((newitr > 300) && (newitr < 1200))
2631 newitr = (newitr / 3);
2632 else
2633 newitr = (newitr / 2);
2634
2635 /*
2636 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2637 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2638 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2639 * on 1G and higher.
2640 */
2641 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2642 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2643 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2644 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2645 }
2646
2647 /* save for next interrupt */
2648 que->eitr_setting = newitr;
2649
2650 /* Reset state */
2651 txr->bytes = 0;
2652 txr->packets = 0;
2653 rxr->bytes = 0;
2654 rxr->packets = 0;
2655
2656 no_calc:
2657 if (more)
2658 ixgbe_sched_handle_que(adapter, que);
2659 else
2660 ixgbe_enable_queue(adapter, que->msix);
2661
2662 return 1;
2663 } /* ixgbe_msix_que */
2664
2665 /************************************************************************
2666 * ixgbe_media_status - Media Ioctl callback
2667 *
2668 * Called whenever the user queries the status of
2669 * the interface using ifconfig.
2670 ************************************************************************/
2671 static void
2672 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2673 {
2674 struct adapter *adapter = ifp->if_softc;
2675 struct ixgbe_hw *hw = &adapter->hw;
2676 int layer;
2677
2678 INIT_DEBUGOUT("ixgbe_media_status: begin");
2679 IXGBE_CORE_LOCK(adapter);
2680 ixgbe_update_link_status(adapter);
2681
2682 ifmr->ifm_status = IFM_AVALID;
2683 ifmr->ifm_active = IFM_ETHER;
2684
2685 if (!adapter->link_active) {
2686 ifmr->ifm_active |= IFM_NONE;
2687 IXGBE_CORE_UNLOCK(adapter);
2688 return;
2689 }
2690
2691 ifmr->ifm_status |= IFM_ACTIVE;
2692 layer = adapter->phy_layer;
2693
2694 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2695 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2696 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2697 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2698 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2699 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2700 switch (adapter->link_speed) {
2701 case IXGBE_LINK_SPEED_10GB_FULL:
2702 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2703 break;
2704 case IXGBE_LINK_SPEED_5GB_FULL:
2705 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2706 break;
2707 case IXGBE_LINK_SPEED_2_5GB_FULL:
2708 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2709 break;
2710 case IXGBE_LINK_SPEED_1GB_FULL:
2711 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2712 break;
2713 case IXGBE_LINK_SPEED_100_FULL:
2714 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2715 break;
2716 case IXGBE_LINK_SPEED_10_FULL:
2717 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2718 break;
2719 }
2720 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2721 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2722 switch (adapter->link_speed) {
2723 case IXGBE_LINK_SPEED_10GB_FULL:
2724 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2725 break;
2726 }
2727 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2728 switch (adapter->link_speed) {
2729 case IXGBE_LINK_SPEED_10GB_FULL:
2730 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2731 break;
2732 case IXGBE_LINK_SPEED_1GB_FULL:
2733 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2734 break;
2735 }
2736 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2737 switch (adapter->link_speed) {
2738 case IXGBE_LINK_SPEED_10GB_FULL:
2739 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2740 break;
2741 case IXGBE_LINK_SPEED_1GB_FULL:
2742 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2743 break;
2744 }
2745 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2746 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2747 switch (adapter->link_speed) {
2748 case IXGBE_LINK_SPEED_10GB_FULL:
2749 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2750 break;
2751 case IXGBE_LINK_SPEED_1GB_FULL:
2752 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2753 break;
2754 }
2755 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2756 switch (adapter->link_speed) {
2757 case IXGBE_LINK_SPEED_10GB_FULL:
2758 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2759 break;
2760 }
2761 /*
2762 * XXX: These need to use the proper media types once
2763 * they're added.
2764 */
2765 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2766 switch (adapter->link_speed) {
2767 case IXGBE_LINK_SPEED_10GB_FULL:
2768 #ifndef IFM_ETH_XTYPE
2769 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2770 #else
2771 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2772 #endif
2773 break;
2774 case IXGBE_LINK_SPEED_2_5GB_FULL:
2775 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2776 break;
2777 case IXGBE_LINK_SPEED_1GB_FULL:
2778 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2779 break;
2780 }
2781 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2782 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2783 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2784 switch (adapter->link_speed) {
2785 case IXGBE_LINK_SPEED_10GB_FULL:
2786 #ifndef IFM_ETH_XTYPE
2787 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2788 #else
2789 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2790 #endif
2791 break;
2792 case IXGBE_LINK_SPEED_2_5GB_FULL:
2793 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2794 break;
2795 case IXGBE_LINK_SPEED_1GB_FULL:
2796 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2797 break;
2798 }
2799
2800 /* If nothing is recognized... */
2801 #if 0
2802 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2803 ifmr->ifm_active |= IFM_UNKNOWN;
2804 #endif
2805
2806 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2807
2808 /* Display current flow control setting used on link */
2809 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2810 hw->fc.current_mode == ixgbe_fc_full)
2811 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2812 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2813 hw->fc.current_mode == ixgbe_fc_full)
2814 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2815
2816 IXGBE_CORE_UNLOCK(adapter);
2817
2818 return;
2819 } /* ixgbe_media_status */
2820
2821 /************************************************************************
2822 * ixgbe_media_change - Media Ioctl callback
2823 *
2824 * Called when the user changes speed/duplex using
2825 * media/mediopt option with ifconfig.
2826 ************************************************************************/
2827 static int
2828 ixgbe_media_change(struct ifnet *ifp)
2829 {
2830 struct adapter *adapter = ifp->if_softc;
2831 struct ifmedia *ifm = &adapter->media;
2832 struct ixgbe_hw *hw = &adapter->hw;
2833 ixgbe_link_speed speed = 0;
2834 ixgbe_link_speed link_caps = 0;
2835 bool negotiate = false;
2836 s32 err = IXGBE_NOT_IMPLEMENTED;
2837
2838 INIT_DEBUGOUT("ixgbe_media_change: begin");
2839
2840 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2841 return (EINVAL);
2842
2843 if (hw->phy.media_type == ixgbe_media_type_backplane)
2844 return (EPERM);
2845
2846 IXGBE_CORE_LOCK(adapter);
2847 /*
2848 * We don't actually need to check against the supported
2849 * media types of the adapter; ifmedia will take care of
2850 * that for us.
2851 */
2852 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2853 case IFM_AUTO:
2854 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2855 &negotiate);
2856 if (err != IXGBE_SUCCESS) {
2857 device_printf(adapter->dev, "Unable to determine "
2858 "supported advertise speeds\n");
2859 IXGBE_CORE_UNLOCK(adapter);
2860 return (ENODEV);
2861 }
2862 speed |= link_caps;
2863 break;
2864 case IFM_10G_T:
2865 case IFM_10G_LRM:
2866 case IFM_10G_LR:
2867 case IFM_10G_TWINAX:
2868 #ifndef IFM_ETH_XTYPE
2869 case IFM_10G_SR: /* KR, too */
2870 case IFM_10G_CX4: /* KX4 */
2871 #else
2872 case IFM_10G_KR:
2873 case IFM_10G_KX4:
2874 #endif
2875 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2876 break;
2877 case IFM_5000_T:
2878 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2879 break;
2880 case IFM_2500_T:
2881 case IFM_2500_KX:
2882 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2883 break;
2884 case IFM_1000_T:
2885 case IFM_1000_LX:
2886 case IFM_1000_SX:
2887 case IFM_1000_KX:
2888 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2889 break;
2890 case IFM_100_TX:
2891 speed |= IXGBE_LINK_SPEED_100_FULL;
2892 break;
2893 case IFM_10_T:
2894 speed |= IXGBE_LINK_SPEED_10_FULL;
2895 break;
2896 case IFM_NONE:
2897 break;
2898 default:
2899 goto invalid;
2900 }
2901
2902 hw->mac.autotry_restart = TRUE;
2903 hw->mac.ops.setup_link(hw, speed, TRUE);
2904 adapter->advertise = 0;
2905 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2906 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2907 adapter->advertise |= 1 << 2;
2908 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2909 adapter->advertise |= 1 << 1;
2910 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2911 adapter->advertise |= 1 << 0;
2912 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2913 adapter->advertise |= 1 << 3;
2914 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2915 adapter->advertise |= 1 << 4;
2916 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2917 adapter->advertise |= 1 << 5;
2918 }
2919
2920 IXGBE_CORE_UNLOCK(adapter);
2921 return (0);
2922
2923 invalid:
2924 device_printf(adapter->dev, "Invalid media type!\n");
2925 IXGBE_CORE_UNLOCK(adapter);
2926
2927 return (EINVAL);
2928 } /* ixgbe_media_change */
2929
2930 /************************************************************************
2931 * ixgbe_set_promisc
2932 ************************************************************************/
2933 static void
2934 ixgbe_set_promisc(struct adapter *adapter)
2935 {
2936 struct ifnet *ifp = adapter->ifp;
2937 int mcnt = 0;
2938 u32 rctl;
2939 struct ether_multi *enm;
2940 struct ether_multistep step;
2941 struct ethercom *ec = &adapter->osdep.ec;
2942
2943 KASSERT(mutex_owned(&adapter->core_mtx));
2944 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2945 rctl &= (~IXGBE_FCTRL_UPE);
2946 if (ifp->if_flags & IFF_ALLMULTI)
2947 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2948 else {
2949 ETHER_LOCK(ec);
2950 ETHER_FIRST_MULTI(step, ec, enm);
2951 while (enm != NULL) {
2952 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2953 break;
2954 mcnt++;
2955 ETHER_NEXT_MULTI(step, enm);
2956 }
2957 ETHER_UNLOCK(ec);
2958 }
2959 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2960 rctl &= (~IXGBE_FCTRL_MPE);
2961 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2962
2963 if (ifp->if_flags & IFF_PROMISC) {
2964 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2965 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2966 } else if (ifp->if_flags & IFF_ALLMULTI) {
2967 rctl |= IXGBE_FCTRL_MPE;
2968 rctl &= ~IXGBE_FCTRL_UPE;
2969 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2970 }
2971 } /* ixgbe_set_promisc */
2972
2973 /************************************************************************
2974 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2975 ************************************************************************/
2976 static int
2977 ixgbe_msix_link(void *arg)
2978 {
2979 struct adapter *adapter = arg;
2980 struct ixgbe_hw *hw = &adapter->hw;
2981 u32 eicr, eicr_mask;
2982 s32 retval;
2983
2984 ++adapter->link_irq.ev_count;
2985
2986 /* Pause other interrupts */
2987 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2988
2989 /* First get the cause */
2990 /*
2991 * The specifications of 82598, 82599, X540 and X550 say EICS register
2992 * is write only. However, Linux says it is a workaround for silicon
2993 * errata to read EICS instead of EICR to get interrupt cause. It seems
2994 * there is a problem about read clear mechanism for EICR register.
2995 */
2996 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2997 /* Be sure the queue bits are not cleared */
2998 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2999 /* Clear interrupt with write */
3000 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3001
3002 /* Link status change */
3003 if (eicr & IXGBE_EICR_LSC) {
3004 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3005 softint_schedule(adapter->link_si);
3006 }
3007
3008 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3009 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3010 (eicr & IXGBE_EICR_FLOW_DIR)) {
3011 /* This is probably overkill :) */
3012 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3013 return 1;
3014 /* Disable the interrupt */
3015 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3016 softint_schedule(adapter->fdir_si);
3017 }
3018
3019 if (eicr & IXGBE_EICR_ECC) {
3020 device_printf(adapter->dev,
3021 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3022 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3023 }
3024
3025 /* Check for over temp condition */
3026 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3027 switch (adapter->hw.mac.type) {
3028 case ixgbe_mac_X550EM_a:
3029 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3030 break;
3031 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3032 IXGBE_EICR_GPI_SDP0_X550EM_a);
3033 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3034 IXGBE_EICR_GPI_SDP0_X550EM_a);
3035 retval = hw->phy.ops.check_overtemp(hw);
3036 if (retval != IXGBE_ERR_OVERTEMP)
3037 break;
3038 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3039 device_printf(adapter->dev, "System shutdown required!\n");
3040 break;
3041 default:
3042 if (!(eicr & IXGBE_EICR_TS))
3043 break;
3044 retval = hw->phy.ops.check_overtemp(hw);
3045 if (retval != IXGBE_ERR_OVERTEMP)
3046 break;
3047 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3048 device_printf(adapter->dev, "System shutdown required!\n");
3049 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3050 break;
3051 }
3052 }
3053
3054 /* Check for VF message */
3055 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3056 (eicr & IXGBE_EICR_MAILBOX))
3057 softint_schedule(adapter->mbx_si);
3058 }
3059
3060 if (ixgbe_is_sfp(hw)) {
3061 /* Pluggable optics-related interrupt */
3062 if (hw->mac.type >= ixgbe_mac_X540)
3063 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3064 else
3065 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3066
3067 if (eicr & eicr_mask) {
3068 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3069 softint_schedule(adapter->mod_si);
3070 }
3071
3072 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3073 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3074 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3075 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3076 softint_schedule(adapter->msf_si);
3077 }
3078 }
3079
3080 /* Check for fan failure */
3081 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3082 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3083 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3084 }
3085
3086 /* External PHY interrupt */
3087 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3088 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3089 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3090 softint_schedule(adapter->phy_si);
3091 }
3092
3093 /* Re-enable other interrupts */
3094 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3095 return 1;
3096 } /* ixgbe_msix_link */
3097
3098 static void
3099 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3100 {
3101
3102 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3103 itr |= itr << 16;
3104 else
3105 itr |= IXGBE_EITR_CNT_WDIS;
3106
3107 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3108 }
3109
3110
3111 /************************************************************************
3112 * ixgbe_sysctl_interrupt_rate_handler
3113 ************************************************************************/
3114 static int
3115 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3116 {
3117 struct sysctlnode node = *rnode;
3118 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3119 struct adapter *adapter = que->adapter;
3120 uint32_t reg, usec, rate;
3121 int error;
3122
3123 if (que == NULL)
3124 return 0;
3125 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3126 usec = ((reg & 0x0FF8) >> 3);
3127 if (usec > 0)
3128 rate = 500000 / usec;
3129 else
3130 rate = 0;
3131 node.sysctl_data = &rate;
3132 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3133 if (error || newp == NULL)
3134 return error;
3135 reg &= ~0xfff; /* default, no limitation */
3136 if (rate > 0 && rate < 500000) {
3137 if (rate < 1000)
3138 rate = 1000;
3139 reg |= ((4000000/rate) & 0xff8);
3140 /*
3141 * When RSC is used, ITR interval must be larger than
3142 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3143 * The minimum value is always greater than 2us on 100M
3144 * (and 10M?(not documented)), but it's not on 1G and higher.
3145 */
3146 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3147 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3148 if ((adapter->num_queues > 1)
3149 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3150 return EINVAL;
3151 }
3152 ixgbe_max_interrupt_rate = rate;
3153 } else
3154 ixgbe_max_interrupt_rate = 0;
3155 ixgbe_eitr_write(adapter, que->msix, reg);
3156
3157 return (0);
3158 } /* ixgbe_sysctl_interrupt_rate_handler */
3159
3160 const struct sysctlnode *
3161 ixgbe_sysctl_instance(struct adapter *adapter)
3162 {
3163 const char *dvname;
3164 struct sysctllog **log;
3165 int rc;
3166 const struct sysctlnode *rnode;
3167
3168 if (adapter->sysctltop != NULL)
3169 return adapter->sysctltop;
3170
3171 log = &adapter->sysctllog;
3172 dvname = device_xname(adapter->dev);
3173
3174 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3175 0, CTLTYPE_NODE, dvname,
3176 SYSCTL_DESCR("ixgbe information and settings"),
3177 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3178 goto err;
3179
3180 return rnode;
3181 err:
3182 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3183 return NULL;
3184 }
3185
3186 /************************************************************************
3187 * ixgbe_add_device_sysctls
3188 ************************************************************************/
3189 static void
3190 ixgbe_add_device_sysctls(struct adapter *adapter)
3191 {
3192 device_t dev = adapter->dev;
3193 struct ixgbe_hw *hw = &adapter->hw;
3194 struct sysctllog **log;
3195 const struct sysctlnode *rnode, *cnode;
3196
3197 log = &adapter->sysctllog;
3198
3199 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3200 aprint_error_dev(dev, "could not create sysctl root\n");
3201 return;
3202 }
3203
3204 if (sysctl_createv(log, 0, &rnode, &cnode,
3205 CTLFLAG_READWRITE, CTLTYPE_INT,
3206 "debug", SYSCTL_DESCR("Debug Info"),
3207 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3208 aprint_error_dev(dev, "could not create sysctl\n");
3209
3210 if (sysctl_createv(log, 0, &rnode, &cnode,
3211 CTLFLAG_READONLY, CTLTYPE_INT,
3212 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3213 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3214 aprint_error_dev(dev, "could not create sysctl\n");
3215
3216 if (sysctl_createv(log, 0, &rnode, &cnode,
3217 CTLFLAG_READONLY, CTLTYPE_INT,
3218 "num_queues", SYSCTL_DESCR("Number of queues"),
3219 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3220 aprint_error_dev(dev, "could not create sysctl\n");
3221
3222 /* Sysctls for all devices */
3223 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3224 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3225 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3226 CTL_EOL) != 0)
3227 aprint_error_dev(dev, "could not create sysctl\n");
3228
3229 adapter->enable_aim = ixgbe_enable_aim;
3230 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3231 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3232 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3233 aprint_error_dev(dev, "could not create sysctl\n");
3234
3235 if (sysctl_createv(log, 0, &rnode, &cnode,
3236 CTLFLAG_READWRITE, CTLTYPE_INT,
3237 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3238 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3239 CTL_EOL) != 0)
3240 aprint_error_dev(dev, "could not create sysctl\n");
3241
3242 /*
3243 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3244 * it causesflip-flopping softint/workqueue mode in one deferred
3245 * processing. Therefore, preempt_disable()/preempt_enable() are
3246 * required in ixgbe_sched_handle_que() to avoid
3247 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3248 * I think changing "que->txrx_use_workqueue" in interrupt handler
3249 * is lighter than doing preempt_disable()/preempt_enable() in every
3250 * ixgbe_sched_handle_que().
3251 */
3252 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3253 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3254 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3255 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3256 aprint_error_dev(dev, "could not create sysctl\n");
3257
3258 #ifdef IXGBE_DEBUG
3259 /* testing sysctls (for all devices) */
3260 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3261 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3262 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3263 CTL_EOL) != 0)
3264 aprint_error_dev(dev, "could not create sysctl\n");
3265
3266 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3267 CTLTYPE_STRING, "print_rss_config",
3268 SYSCTL_DESCR("Prints RSS Configuration"),
3269 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3270 CTL_EOL) != 0)
3271 aprint_error_dev(dev, "could not create sysctl\n");
3272 #endif
3273 /* for X550 series devices */
3274 if (hw->mac.type >= ixgbe_mac_X550)
3275 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3276 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3277 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3278 CTL_EOL) != 0)
3279 aprint_error_dev(dev, "could not create sysctl\n");
3280
3281 /* for WoL-capable devices */
3282 if (adapter->wol_support) {
3283 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3284 CTLTYPE_BOOL, "wol_enable",
3285 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3286 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3287 CTL_EOL) != 0)
3288 aprint_error_dev(dev, "could not create sysctl\n");
3289
3290 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3291 CTLTYPE_INT, "wufc",
3292 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3293 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3294 CTL_EOL) != 0)
3295 aprint_error_dev(dev, "could not create sysctl\n");
3296 }
3297
3298 /* for X552/X557-AT devices */
3299 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3300 const struct sysctlnode *phy_node;
3301
3302 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3303 "phy", SYSCTL_DESCR("External PHY sysctls"),
3304 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3305 aprint_error_dev(dev, "could not create sysctl\n");
3306 return;
3307 }
3308
3309 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3310 CTLTYPE_INT, "temp",
3311 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3312 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3313 CTL_EOL) != 0)
3314 aprint_error_dev(dev, "could not create sysctl\n");
3315
3316 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3317 CTLTYPE_INT, "overtemp_occurred",
3318 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3319 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3320 CTL_CREATE, CTL_EOL) != 0)
3321 aprint_error_dev(dev, "could not create sysctl\n");
3322 }
3323
3324 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3325 && (hw->phy.type == ixgbe_phy_fw))
3326 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3327 CTLTYPE_BOOL, "force_10_100_autonego",
3328 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3329 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3330 CTL_CREATE, CTL_EOL) != 0)
3331 aprint_error_dev(dev, "could not create sysctl\n");
3332
3333 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3334 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3335 CTLTYPE_INT, "eee_state",
3336 SYSCTL_DESCR("EEE Power Save State"),
3337 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3338 CTL_EOL) != 0)
3339 aprint_error_dev(dev, "could not create sysctl\n");
3340 }
3341 } /* ixgbe_add_device_sysctls */
3342
3343 /************************************************************************
3344 * ixgbe_allocate_pci_resources
3345 ************************************************************************/
3346 static int
3347 ixgbe_allocate_pci_resources(struct adapter *adapter,
3348 const struct pci_attach_args *pa)
3349 {
3350 pcireg_t memtype;
3351 device_t dev = adapter->dev;
3352 bus_addr_t addr;
3353 int flags;
3354
3355 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3356 switch (memtype) {
3357 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3358 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3359 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3360 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3361 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3362 goto map_err;
3363 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3364 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3365 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3366 }
3367 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3368 adapter->osdep.mem_size, flags,
3369 &adapter->osdep.mem_bus_space_handle) != 0) {
3370 map_err:
3371 adapter->osdep.mem_size = 0;
3372 aprint_error_dev(dev, "unable to map BAR0\n");
3373 return ENXIO;
3374 }
3375 break;
3376 default:
3377 aprint_error_dev(dev, "unexpected type on BAR0\n");
3378 return ENXIO;
3379 }
3380
3381 return (0);
3382 } /* ixgbe_allocate_pci_resources */
3383
3384 static void
3385 ixgbe_free_softint(struct adapter *adapter)
3386 {
3387 struct ix_queue *que = adapter->queues;
3388 struct tx_ring *txr = adapter->tx_rings;
3389 int i;
3390
3391 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3392 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3393 if (txr->txr_si != NULL)
3394 softint_disestablish(txr->txr_si);
3395 }
3396 if (que->que_si != NULL)
3397 softint_disestablish(que->que_si);
3398 }
3399 if (adapter->txr_wq != NULL)
3400 workqueue_destroy(adapter->txr_wq);
3401 if (adapter->txr_wq_enqueued != NULL)
3402 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3403 if (adapter->que_wq != NULL)
3404 workqueue_destroy(adapter->que_wq);
3405
3406 /* Drain the Link queue */
3407 if (adapter->link_si != NULL) {
3408 softint_disestablish(adapter->link_si);
3409 adapter->link_si = NULL;
3410 }
3411 if (adapter->mod_si != NULL) {
3412 softint_disestablish(adapter->mod_si);
3413 adapter->mod_si = NULL;
3414 }
3415 if (adapter->msf_si != NULL) {
3416 softint_disestablish(adapter->msf_si);
3417 adapter->msf_si = NULL;
3418 }
3419 if (adapter->phy_si != NULL) {
3420 softint_disestablish(adapter->phy_si);
3421 adapter->phy_si = NULL;
3422 }
3423 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3424 if (adapter->fdir_si != NULL) {
3425 softint_disestablish(adapter->fdir_si);
3426 adapter->fdir_si = NULL;
3427 }
3428 }
3429 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3430 if (adapter->mbx_si != NULL) {
3431 softint_disestablish(adapter->mbx_si);
3432 adapter->mbx_si = NULL;
3433 }
3434 }
3435 } /* ixgbe_free_softint */
3436
3437 /************************************************************************
3438 * ixgbe_detach - Device removal routine
3439 *
3440 * Called when the driver is being removed.
3441 * Stops the adapter and deallocates all the resources
3442 * that were allocated for driver operation.
3443 *
3444 * return 0 on success, positive on failure
3445 ************************************************************************/
3446 static int
3447 ixgbe_detach(device_t dev, int flags)
3448 {
3449 struct adapter *adapter = device_private(dev);
3450 struct rx_ring *rxr = adapter->rx_rings;
3451 struct tx_ring *txr = adapter->tx_rings;
3452 struct ixgbe_hw *hw = &adapter->hw;
3453 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3454 u32 ctrl_ext;
3455
3456 INIT_DEBUGOUT("ixgbe_detach: begin");
3457 if (adapter->osdep.attached == false)
3458 return 0;
3459
3460 if (ixgbe_pci_iov_detach(dev) != 0) {
3461 device_printf(dev, "SR-IOV in use; detach first.\n");
3462 return (EBUSY);
3463 }
3464
3465 /* Stop the interface. Callouts are stopped in it. */
3466 ixgbe_ifstop(adapter->ifp, 1);
3467 #if NVLAN > 0
3468 /* Make sure VLANs are not using driver */
3469 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3470 ; /* nothing to do: no VLANs */
3471 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3472 vlan_ifdetach(adapter->ifp);
3473 else {
3474 aprint_error_dev(dev, "VLANs in use, detach first\n");
3475 return (EBUSY);
3476 }
3477 #endif
3478
3479 pmf_device_deregister(dev);
3480
3481 ether_ifdetach(adapter->ifp);
3482 /* Stop the adapter */
3483 IXGBE_CORE_LOCK(adapter);
3484 ixgbe_setup_low_power_mode(adapter);
3485 IXGBE_CORE_UNLOCK(adapter);
3486
3487 ixgbe_free_softint(adapter);
3488
3489 /* let hardware know driver is unloading */
3490 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3491 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3492 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3493
3494 callout_halt(&adapter->timer, NULL);
3495
3496 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3497 netmap_detach(adapter->ifp);
3498
3499 ixgbe_free_pci_resources(adapter);
3500 #if 0 /* XXX the NetBSD port is probably missing something here */
3501 bus_generic_detach(dev);
3502 #endif
3503 if_detach(adapter->ifp);
3504 if_percpuq_destroy(adapter->ipq);
3505
3506 sysctl_teardown(&adapter->sysctllog);
3507 evcnt_detach(&adapter->efbig_tx_dma_setup);
3508 evcnt_detach(&adapter->mbuf_defrag_failed);
3509 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3510 evcnt_detach(&adapter->einval_tx_dma_setup);
3511 evcnt_detach(&adapter->other_tx_dma_setup);
3512 evcnt_detach(&adapter->eagain_tx_dma_setup);
3513 evcnt_detach(&adapter->enomem_tx_dma_setup);
3514 evcnt_detach(&adapter->watchdog_events);
3515 evcnt_detach(&adapter->tso_err);
3516 evcnt_detach(&adapter->link_irq);
3517 evcnt_detach(&adapter->link_sicount);
3518 evcnt_detach(&adapter->mod_sicount);
3519 evcnt_detach(&adapter->msf_sicount);
3520 evcnt_detach(&adapter->phy_sicount);
3521
3522 txr = adapter->tx_rings;
3523 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3524 evcnt_detach(&adapter->queues[i].irqs);
3525 evcnt_detach(&adapter->queues[i].handleq);
3526 evcnt_detach(&adapter->queues[i].req);
3527 evcnt_detach(&txr->no_desc_avail);
3528 evcnt_detach(&txr->total_packets);
3529 evcnt_detach(&txr->tso_tx);
3530 #ifndef IXGBE_LEGACY_TX
3531 evcnt_detach(&txr->pcq_drops);
3532 #endif
3533
3534 if (i < __arraycount(stats->mpc)) {
3535 evcnt_detach(&stats->mpc[i]);
3536 if (hw->mac.type == ixgbe_mac_82598EB)
3537 evcnt_detach(&stats->rnbc[i]);
3538 }
3539 if (i < __arraycount(stats->pxontxc)) {
3540 evcnt_detach(&stats->pxontxc[i]);
3541 evcnt_detach(&stats->pxonrxc[i]);
3542 evcnt_detach(&stats->pxofftxc[i]);
3543 evcnt_detach(&stats->pxoffrxc[i]);
3544 if (hw->mac.type >= ixgbe_mac_82599EB)
3545 evcnt_detach(&stats->pxon2offc[i]);
3546 }
3547 if (i < __arraycount(stats->qprc)) {
3548 evcnt_detach(&stats->qprc[i]);
3549 evcnt_detach(&stats->qptc[i]);
3550 evcnt_detach(&stats->qbrc[i]);
3551 evcnt_detach(&stats->qbtc[i]);
3552 if (hw->mac.type >= ixgbe_mac_82599EB)
3553 evcnt_detach(&stats->qprdc[i]);
3554 }
3555
3556 evcnt_detach(&rxr->rx_packets);
3557 evcnt_detach(&rxr->rx_bytes);
3558 evcnt_detach(&rxr->rx_copies);
3559 evcnt_detach(&rxr->no_jmbuf);
3560 evcnt_detach(&rxr->rx_discarded);
3561 }
3562 evcnt_detach(&stats->ipcs);
3563 evcnt_detach(&stats->l4cs);
3564 evcnt_detach(&stats->ipcs_bad);
3565 evcnt_detach(&stats->l4cs_bad);
3566 evcnt_detach(&stats->intzero);
3567 evcnt_detach(&stats->legint);
3568 evcnt_detach(&stats->crcerrs);
3569 evcnt_detach(&stats->illerrc);
3570 evcnt_detach(&stats->errbc);
3571 evcnt_detach(&stats->mspdc);
3572 if (hw->mac.type >= ixgbe_mac_X550)
3573 evcnt_detach(&stats->mbsdc);
3574 evcnt_detach(&stats->mpctotal);
3575 evcnt_detach(&stats->mlfc);
3576 evcnt_detach(&stats->mrfc);
3577 evcnt_detach(&stats->rlec);
3578 evcnt_detach(&stats->lxontxc);
3579 evcnt_detach(&stats->lxonrxc);
3580 evcnt_detach(&stats->lxofftxc);
3581 evcnt_detach(&stats->lxoffrxc);
3582
3583 /* Packet Reception Stats */
3584 evcnt_detach(&stats->tor);
3585 evcnt_detach(&stats->gorc);
3586 evcnt_detach(&stats->tpr);
3587 evcnt_detach(&stats->gprc);
3588 evcnt_detach(&stats->mprc);
3589 evcnt_detach(&stats->bprc);
3590 evcnt_detach(&stats->prc64);
3591 evcnt_detach(&stats->prc127);
3592 evcnt_detach(&stats->prc255);
3593 evcnt_detach(&stats->prc511);
3594 evcnt_detach(&stats->prc1023);
3595 evcnt_detach(&stats->prc1522);
3596 evcnt_detach(&stats->ruc);
3597 evcnt_detach(&stats->rfc);
3598 evcnt_detach(&stats->roc);
3599 evcnt_detach(&stats->rjc);
3600 evcnt_detach(&stats->mngprc);
3601 evcnt_detach(&stats->mngpdc);
3602 evcnt_detach(&stats->xec);
3603
3604 /* Packet Transmission Stats */
3605 evcnt_detach(&stats->gotc);
3606 evcnt_detach(&stats->tpt);
3607 evcnt_detach(&stats->gptc);
3608 evcnt_detach(&stats->bptc);
3609 evcnt_detach(&stats->mptc);
3610 evcnt_detach(&stats->mngptc);
3611 evcnt_detach(&stats->ptc64);
3612 evcnt_detach(&stats->ptc127);
3613 evcnt_detach(&stats->ptc255);
3614 evcnt_detach(&stats->ptc511);
3615 evcnt_detach(&stats->ptc1023);
3616 evcnt_detach(&stats->ptc1522);
3617
3618 ixgbe_free_transmit_structures(adapter);
3619 ixgbe_free_receive_structures(adapter);
3620 for (int i = 0; i < adapter->num_queues; i++) {
3621 struct ix_queue * que = &adapter->queues[i];
3622 mutex_destroy(&que->dc_mtx);
3623 }
3624 free(adapter->queues, M_DEVBUF);
3625 free(adapter->mta, M_DEVBUF);
3626
3627 IXGBE_CORE_LOCK_DESTROY(adapter);
3628
3629 return (0);
3630 } /* ixgbe_detach */
3631
3632 /************************************************************************
3633 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3634 *
3635 * Prepare the adapter/port for LPLU and/or WoL
3636 ************************************************************************/
3637 static int
3638 ixgbe_setup_low_power_mode(struct adapter *adapter)
3639 {
3640 struct ixgbe_hw *hw = &adapter->hw;
3641 device_t dev = adapter->dev;
3642 s32 error = 0;
3643
3644 KASSERT(mutex_owned(&adapter->core_mtx));
3645
3646 /* Limit power management flow to X550EM baseT */
3647 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3648 hw->phy.ops.enter_lplu) {
3649 /* X550EM baseT adapters need a special LPLU flow */
3650 hw->phy.reset_disable = true;
3651 ixgbe_stop(adapter);
3652 error = hw->phy.ops.enter_lplu(hw);
3653 if (error)
3654 device_printf(dev,
3655 "Error entering LPLU: %d\n", error);
3656 hw->phy.reset_disable = false;
3657 } else {
3658 /* Just stop for other adapters */
3659 ixgbe_stop(adapter);
3660 }
3661
3662 if (!hw->wol_enabled) {
3663 ixgbe_set_phy_power(hw, FALSE);
3664 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3665 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3666 } else {
3667 /* Turn off support for APM wakeup. (Using ACPI instead) */
3668 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3669 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3670
3671 /*
3672 * Clear Wake Up Status register to prevent any previous wakeup
3673 * events from waking us up immediately after we suspend.
3674 */
3675 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3676
3677 /*
3678 * Program the Wakeup Filter Control register with user filter
3679 * settings
3680 */
3681 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3682
3683 /* Enable wakeups and power management in Wakeup Control */
3684 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3685 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3686
3687 }
3688
3689 return error;
3690 } /* ixgbe_setup_low_power_mode */
3691
3692 /************************************************************************
3693 * ixgbe_shutdown - Shutdown entry point
3694 ************************************************************************/
3695 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3696 static int
3697 ixgbe_shutdown(device_t dev)
3698 {
3699 struct adapter *adapter = device_private(dev);
3700 int error = 0;
3701
3702 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3703
3704 IXGBE_CORE_LOCK(adapter);
3705 error = ixgbe_setup_low_power_mode(adapter);
3706 IXGBE_CORE_UNLOCK(adapter);
3707
3708 return (error);
3709 } /* ixgbe_shutdown */
3710 #endif
3711
3712 /************************************************************************
3713 * ixgbe_suspend
3714 *
3715 * From D0 to D3
3716 ************************************************************************/
3717 static bool
3718 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3719 {
3720 struct adapter *adapter = device_private(dev);
3721 int error = 0;
3722
3723 INIT_DEBUGOUT("ixgbe_suspend: begin");
3724
3725 IXGBE_CORE_LOCK(adapter);
3726
3727 error = ixgbe_setup_low_power_mode(adapter);
3728
3729 IXGBE_CORE_UNLOCK(adapter);
3730
3731 return (error);
3732 } /* ixgbe_suspend */
3733
3734 /************************************************************************
3735 * ixgbe_resume
3736 *
3737 * From D3 to D0
3738 ************************************************************************/
3739 static bool
3740 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3741 {
3742 struct adapter *adapter = device_private(dev);
3743 struct ifnet *ifp = adapter->ifp;
3744 struct ixgbe_hw *hw = &adapter->hw;
3745 u32 wus;
3746
3747 INIT_DEBUGOUT("ixgbe_resume: begin");
3748
3749 IXGBE_CORE_LOCK(adapter);
3750
3751 /* Read & clear WUS register */
3752 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3753 if (wus)
3754 device_printf(dev, "Woken up by (WUS): %#010x\n",
3755 IXGBE_READ_REG(hw, IXGBE_WUS));
3756 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3757 /* And clear WUFC until next low-power transition */
3758 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3759
3760 /*
3761 * Required after D3->D0 transition;
3762 * will re-advertise all previous advertised speeds
3763 */
3764 if (ifp->if_flags & IFF_UP)
3765 ixgbe_init_locked(adapter);
3766
3767 IXGBE_CORE_UNLOCK(adapter);
3768
3769 return true;
3770 } /* ixgbe_resume */
3771
3772 /*
3773 * Set the various hardware offload abilities.
3774 *
3775 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3776 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3777 * mbuf offload flags the driver will understand.
3778 */
3779 static void
3780 ixgbe_set_if_hwassist(struct adapter *adapter)
3781 {
3782 /* XXX */
3783 }
3784
3785 /************************************************************************
3786 * ixgbe_init_locked - Init entry point
3787 *
3788 * Used in two ways: It is used by the stack as an init
3789 * entry point in network interface structure. It is also
3790 * used by the driver as a hw/sw initialization routine to
3791 * get to a consistent state.
3792 *
3793 * return 0 on success, positive on failure
3794 ************************************************************************/
3795 static void
3796 ixgbe_init_locked(struct adapter *adapter)
3797 {
3798 struct ifnet *ifp = adapter->ifp;
3799 device_t dev = adapter->dev;
3800 struct ixgbe_hw *hw = &adapter->hw;
3801 struct ix_queue *que;
3802 struct tx_ring *txr;
3803 struct rx_ring *rxr;
3804 u32 txdctl, mhadd;
3805 u32 rxdctl, rxctrl;
3806 u32 ctrl_ext;
3807 int i, j, err;
3808
3809 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3810
3811 KASSERT(mutex_owned(&adapter->core_mtx));
3812 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3813
3814 hw->adapter_stopped = FALSE;
3815 ixgbe_stop_adapter(hw);
3816 callout_stop(&adapter->timer);
3817 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3818 que->disabled_count = 0;
3819
3820 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3821 adapter->max_frame_size =
3822 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3823
3824 /* Queue indices may change with IOV mode */
3825 ixgbe_align_all_queue_indices(adapter);
3826
3827 /* reprogram the RAR[0] in case user changed it. */
3828 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3829
3830 /* Get the latest mac address, User can use a LAA */
3831 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3832 IXGBE_ETH_LENGTH_OF_ADDRESS);
3833 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3834 hw->addr_ctrl.rar_used_count = 1;
3835
3836 /* Set hardware offload abilities from ifnet flags */
3837 ixgbe_set_if_hwassist(adapter);
3838
3839 /* Prepare transmit descriptors and buffers */
3840 if (ixgbe_setup_transmit_structures(adapter)) {
3841 device_printf(dev, "Could not setup transmit structures\n");
3842 ixgbe_stop(adapter);
3843 return;
3844 }
3845
3846 ixgbe_init_hw(hw);
3847
3848 ixgbe_initialize_iov(adapter);
3849
3850 ixgbe_initialize_transmit_units(adapter);
3851
3852 /* Setup Multicast table */
3853 ixgbe_set_multi(adapter);
3854
3855 /* Determine the correct mbuf pool, based on frame size */
3856 if (adapter->max_frame_size <= MCLBYTES)
3857 adapter->rx_mbuf_sz = MCLBYTES;
3858 else
3859 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3860
3861 /* Prepare receive descriptors and buffers */
3862 if (ixgbe_setup_receive_structures(adapter)) {
3863 device_printf(dev, "Could not setup receive structures\n");
3864 ixgbe_stop(adapter);
3865 return;
3866 }
3867
3868 /* Configure RX settings */
3869 ixgbe_initialize_receive_units(adapter);
3870
3871 /* Enable SDP & MSI-X interrupts based on adapter */
3872 ixgbe_config_gpie(adapter);
3873
3874 /* Set MTU size */
3875 if (ifp->if_mtu > ETHERMTU) {
3876 /* aka IXGBE_MAXFRS on 82599 and newer */
3877 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3878 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3879 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3880 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3881 }
3882
3883 /* Now enable all the queues */
3884 for (i = 0; i < adapter->num_queues; i++) {
3885 txr = &adapter->tx_rings[i];
3886 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3887 txdctl |= IXGBE_TXDCTL_ENABLE;
3888 /* Set WTHRESH to 8, burst writeback */
3889 txdctl |= (8 << 16);
3890 /*
3891 * When the internal queue falls below PTHRESH (32),
3892 * start prefetching as long as there are at least
3893 * HTHRESH (1) buffers ready. The values are taken
3894 * from the Intel linux driver 3.8.21.
3895 * Prefetching enables tx line rate even with 1 queue.
3896 */
3897 txdctl |= (32 << 0) | (1 << 8);
3898 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3899 }
3900
3901 for (i = 0; i < adapter->num_queues; i++) {
3902 rxr = &adapter->rx_rings[i];
3903 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3904 if (hw->mac.type == ixgbe_mac_82598EB) {
3905 /*
3906 * PTHRESH = 21
3907 * HTHRESH = 4
3908 * WTHRESH = 8
3909 */
3910 rxdctl &= ~0x3FFFFF;
3911 rxdctl |= 0x080420;
3912 }
3913 rxdctl |= IXGBE_RXDCTL_ENABLE;
3914 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3915 for (j = 0; j < 10; j++) {
3916 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3917 IXGBE_RXDCTL_ENABLE)
3918 break;
3919 else
3920 msec_delay(1);
3921 }
3922 wmb();
3923
3924 /*
3925 * In netmap mode, we must preserve the buffers made
3926 * available to userspace before the if_init()
3927 * (this is true by default on the TX side, because
3928 * init makes all buffers available to userspace).
3929 *
3930 * netmap_reset() and the device specific routines
3931 * (e.g. ixgbe_setup_receive_rings()) map these
3932 * buffers at the end of the NIC ring, so here we
3933 * must set the RDT (tail) register to make sure
3934 * they are not overwritten.
3935 *
3936 * In this driver the NIC ring starts at RDH = 0,
3937 * RDT points to the last slot available for reception (?),
3938 * so RDT = num_rx_desc - 1 means the whole ring is available.
3939 */
3940 #ifdef DEV_NETMAP
3941 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3942 (ifp->if_capenable & IFCAP_NETMAP)) {
3943 struct netmap_adapter *na = NA(adapter->ifp);
3944 struct netmap_kring *kring = &na->rx_rings[i];
3945 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3946
3947 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3948 } else
3949 #endif /* DEV_NETMAP */
3950 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3951 adapter->num_rx_desc - 1);
3952 }
3953
3954 /* Enable Receive engine */
3955 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3956 if (hw->mac.type == ixgbe_mac_82598EB)
3957 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3958 rxctrl |= IXGBE_RXCTRL_RXEN;
3959 ixgbe_enable_rx_dma(hw, rxctrl);
3960
3961 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3962
3963 /* Set up MSI/MSI-X routing */
3964 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3965 ixgbe_configure_ivars(adapter);
3966 /* Set up auto-mask */
3967 if (hw->mac.type == ixgbe_mac_82598EB)
3968 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3969 else {
3970 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3971 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3972 }
3973 } else { /* Simple settings for Legacy/MSI */
3974 ixgbe_set_ivar(adapter, 0, 0, 0);
3975 ixgbe_set_ivar(adapter, 0, 0, 1);
3976 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3977 }
3978
3979 ixgbe_init_fdir(adapter);
3980
3981 /*
3982 * Check on any SFP devices that
3983 * need to be kick-started
3984 */
3985 if (hw->phy.type == ixgbe_phy_none) {
3986 err = hw->phy.ops.identify(hw);
3987 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3988 device_printf(dev,
3989 "Unsupported SFP+ module type was detected.\n");
3990 return;
3991 }
3992 }
3993
3994 /* Set moderation on the Link interrupt */
3995 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
3996
3997 /* Enable power to the phy. */
3998 ixgbe_set_phy_power(hw, TRUE);
3999
4000 /* Config/Enable Link */
4001 ixgbe_config_link(adapter);
4002
4003 /* Hardware Packet Buffer & Flow Control setup */
4004 ixgbe_config_delay_values(adapter);
4005
4006 /* Initialize the FC settings */
4007 ixgbe_start_hw(hw);
4008
4009 /* Set up VLAN support and filter */
4010 ixgbe_setup_vlan_hw_support(adapter);
4011
4012 /* Setup DMA Coalescing */
4013 ixgbe_config_dmac(adapter);
4014
4015 /* And now turn on interrupts */
4016 ixgbe_enable_intr(adapter);
4017
4018 /* Enable the use of the MBX by the VF's */
4019 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4020 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4021 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4022 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4023 }
4024
4025 /* Update saved flags. See ixgbe_ifflags_cb() */
4026 adapter->if_flags = ifp->if_flags;
4027
4028 /* Now inform the stack we're ready */
4029 ifp->if_flags |= IFF_RUNNING;
4030
4031 return;
4032 } /* ixgbe_init_locked */
4033
4034 /************************************************************************
4035 * ixgbe_init
4036 ************************************************************************/
4037 static int
4038 ixgbe_init(struct ifnet *ifp)
4039 {
4040 struct adapter *adapter = ifp->if_softc;
4041
4042 IXGBE_CORE_LOCK(adapter);
4043 ixgbe_init_locked(adapter);
4044 IXGBE_CORE_UNLOCK(adapter);
4045
4046 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4047 } /* ixgbe_init */
4048
4049 /************************************************************************
4050 * ixgbe_set_ivar
4051 *
4052 * Setup the correct IVAR register for a particular MSI-X interrupt
4053 * (yes this is all very magic and confusing :)
4054 * - entry is the register array entry
4055 * - vector is the MSI-X vector for this queue
4056 * - type is RX/TX/MISC
4057 ************************************************************************/
4058 static void
4059 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4060 {
4061 struct ixgbe_hw *hw = &adapter->hw;
4062 u32 ivar, index;
4063
4064 vector |= IXGBE_IVAR_ALLOC_VAL;
4065
4066 switch (hw->mac.type) {
4067 case ixgbe_mac_82598EB:
4068 if (type == -1)
4069 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4070 else
4071 entry += (type * 64);
4072 index = (entry >> 2) & 0x1F;
4073 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4074 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4075 ivar |= (vector << (8 * (entry & 0x3)));
4076 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4077 break;
4078 case ixgbe_mac_82599EB:
4079 case ixgbe_mac_X540:
4080 case ixgbe_mac_X550:
4081 case ixgbe_mac_X550EM_x:
4082 case ixgbe_mac_X550EM_a:
4083 if (type == -1) { /* MISC IVAR */
4084 index = (entry & 1) * 8;
4085 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4086 ivar &= ~(0xFF << index);
4087 ivar |= (vector << index);
4088 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4089 } else { /* RX/TX IVARS */
4090 index = (16 * (entry & 1)) + (8 * type);
4091 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4092 ivar &= ~(0xFF << index);
4093 ivar |= (vector << index);
4094 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4095 }
4096 break;
4097 default:
4098 break;
4099 }
4100 } /* ixgbe_set_ivar */
4101
4102 /************************************************************************
4103 * ixgbe_configure_ivars
4104 ************************************************************************/
4105 static void
4106 ixgbe_configure_ivars(struct adapter *adapter)
4107 {
4108 struct ix_queue *que = adapter->queues;
4109 u32 newitr;
4110
4111 if (ixgbe_max_interrupt_rate > 0)
4112 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4113 else {
4114 /*
4115 * Disable DMA coalescing if interrupt moderation is
4116 * disabled.
4117 */
4118 adapter->dmac = 0;
4119 newitr = 0;
4120 }
4121
4122 for (int i = 0; i < adapter->num_queues; i++, que++) {
4123 struct rx_ring *rxr = &adapter->rx_rings[i];
4124 struct tx_ring *txr = &adapter->tx_rings[i];
4125 /* First the RX queue entry */
4126 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4127 /* ... and the TX */
4128 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4129 /* Set an Initial EITR value */
4130 ixgbe_eitr_write(adapter, que->msix, newitr);
4131 /*
4132 * To eliminate influence of the previous state.
4133 * At this point, Tx/Rx interrupt handler
4134 * (ixgbe_msix_que()) cannot be called, so both
4135 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4136 */
4137 que->eitr_setting = 0;
4138 }
4139
4140 /* For the Link interrupt */
4141 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4142 } /* ixgbe_configure_ivars */
4143
4144 /************************************************************************
4145 * ixgbe_config_gpie
4146 ************************************************************************/
4147 static void
4148 ixgbe_config_gpie(struct adapter *adapter)
4149 {
4150 struct ixgbe_hw *hw = &adapter->hw;
4151 u32 gpie;
4152
4153 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4154
4155 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4156 /* Enable Enhanced MSI-X mode */
4157 gpie |= IXGBE_GPIE_MSIX_MODE
4158 | IXGBE_GPIE_EIAME
4159 | IXGBE_GPIE_PBA_SUPPORT
4160 | IXGBE_GPIE_OCD;
4161 }
4162
4163 /* Fan Failure Interrupt */
4164 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4165 gpie |= IXGBE_SDP1_GPIEN;
4166
4167 /* Thermal Sensor Interrupt */
4168 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4169 gpie |= IXGBE_SDP0_GPIEN_X540;
4170
4171 /* Link detection */
4172 switch (hw->mac.type) {
4173 case ixgbe_mac_82599EB:
4174 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4175 break;
4176 case ixgbe_mac_X550EM_x:
4177 case ixgbe_mac_X550EM_a:
4178 gpie |= IXGBE_SDP0_GPIEN_X540;
4179 break;
4180 default:
4181 break;
4182 }
4183
4184 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4185
4186 } /* ixgbe_config_gpie */
4187
4188 /************************************************************************
4189 * ixgbe_config_delay_values
4190 *
4191 * Requires adapter->max_frame_size to be set.
4192 ************************************************************************/
4193 static void
4194 ixgbe_config_delay_values(struct adapter *adapter)
4195 {
4196 struct ixgbe_hw *hw = &adapter->hw;
4197 u32 rxpb, frame, size, tmp;
4198
4199 frame = adapter->max_frame_size;
4200
4201 /* Calculate High Water */
4202 switch (hw->mac.type) {
4203 case ixgbe_mac_X540:
4204 case ixgbe_mac_X550:
4205 case ixgbe_mac_X550EM_x:
4206 case ixgbe_mac_X550EM_a:
4207 tmp = IXGBE_DV_X540(frame, frame);
4208 break;
4209 default:
4210 tmp = IXGBE_DV(frame, frame);
4211 break;
4212 }
4213 size = IXGBE_BT2KB(tmp);
4214 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4215 hw->fc.high_water[0] = rxpb - size;
4216
4217 /* Now calculate Low Water */
4218 switch (hw->mac.type) {
4219 case ixgbe_mac_X540:
4220 case ixgbe_mac_X550:
4221 case ixgbe_mac_X550EM_x:
4222 case ixgbe_mac_X550EM_a:
4223 tmp = IXGBE_LOW_DV_X540(frame);
4224 break;
4225 default:
4226 tmp = IXGBE_LOW_DV(frame);
4227 break;
4228 }
4229 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4230
4231 hw->fc.pause_time = IXGBE_FC_PAUSE;
4232 hw->fc.send_xon = TRUE;
4233 } /* ixgbe_config_delay_values */
4234
4235 /************************************************************************
4236 * ixgbe_set_multi - Multicast Update
4237 *
4238 * Called whenever multicast address list is updated.
4239 ************************************************************************/
4240 static void
4241 ixgbe_set_multi(struct adapter *adapter)
4242 {
4243 struct ixgbe_mc_addr *mta;
4244 struct ifnet *ifp = adapter->ifp;
4245 u8 *update_ptr;
4246 int mcnt = 0;
4247 u32 fctrl;
4248 struct ethercom *ec = &adapter->osdep.ec;
4249 struct ether_multi *enm;
4250 struct ether_multistep step;
4251
4252 KASSERT(mutex_owned(&adapter->core_mtx));
4253 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4254
4255 mta = adapter->mta;
4256 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4257
4258 ifp->if_flags &= ~IFF_ALLMULTI;
4259 ETHER_LOCK(ec);
4260 ETHER_FIRST_MULTI(step, ec, enm);
4261 while (enm != NULL) {
4262 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4263 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4264 ETHER_ADDR_LEN) != 0)) {
4265 ifp->if_flags |= IFF_ALLMULTI;
4266 break;
4267 }
4268 bcopy(enm->enm_addrlo,
4269 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4270 mta[mcnt].vmdq = adapter->pool;
4271 mcnt++;
4272 ETHER_NEXT_MULTI(step, enm);
4273 }
4274 ETHER_UNLOCK(ec);
4275
4276 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4277 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4278 if (ifp->if_flags & IFF_PROMISC)
4279 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4280 else if (ifp->if_flags & IFF_ALLMULTI) {
4281 fctrl |= IXGBE_FCTRL_MPE;
4282 }
4283
4284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4285
4286 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4287 update_ptr = (u8 *)mta;
4288 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4289 ixgbe_mc_array_itr, TRUE);
4290 }
4291
4292 } /* ixgbe_set_multi */
4293
4294 /************************************************************************
4295 * ixgbe_mc_array_itr
4296 *
4297 * An iterator function needed by the multicast shared code.
4298 * It feeds the shared code routine the addresses in the
4299 * array of ixgbe_set_multi() one by one.
4300 ************************************************************************/
4301 static u8 *
4302 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4303 {
4304 struct ixgbe_mc_addr *mta;
4305
4306 mta = (struct ixgbe_mc_addr *)*update_ptr;
4307 *vmdq = mta->vmdq;
4308
4309 *update_ptr = (u8*)(mta + 1);
4310
4311 return (mta->addr);
4312 } /* ixgbe_mc_array_itr */
4313
4314 /************************************************************************
4315 * ixgbe_local_timer - Timer routine
4316 *
4317 * Checks for link status, updates statistics,
4318 * and runs the watchdog check.
4319 ************************************************************************/
4320 static void
4321 ixgbe_local_timer(void *arg)
4322 {
4323 struct adapter *adapter = arg;
4324
4325 IXGBE_CORE_LOCK(adapter);
4326 ixgbe_local_timer1(adapter);
4327 IXGBE_CORE_UNLOCK(adapter);
4328 }
4329
4330 static void
4331 ixgbe_local_timer1(void *arg)
4332 {
4333 struct adapter *adapter = arg;
4334 device_t dev = adapter->dev;
4335 struct ix_queue *que = adapter->queues;
4336 u64 queues = 0;
4337 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4338 int hung = 0;
4339 int i;
4340
4341 KASSERT(mutex_owned(&adapter->core_mtx));
4342
4343 /* Check for pluggable optics */
4344 if (adapter->sfp_probe)
4345 if (!ixgbe_sfp_probe(adapter))
4346 goto out; /* Nothing to do */
4347
4348 ixgbe_update_link_status(adapter);
4349 ixgbe_update_stats_counters(adapter);
4350
4351 /* Update some event counters */
4352 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4353 que = adapter->queues;
4354 for (i = 0; i < adapter->num_queues; i++, que++) {
4355 struct tx_ring *txr = que->txr;
4356
4357 v0 += txr->q_efbig_tx_dma_setup;
4358 v1 += txr->q_mbuf_defrag_failed;
4359 v2 += txr->q_efbig2_tx_dma_setup;
4360 v3 += txr->q_einval_tx_dma_setup;
4361 v4 += txr->q_other_tx_dma_setup;
4362 v5 += txr->q_eagain_tx_dma_setup;
4363 v6 += txr->q_enomem_tx_dma_setup;
4364 v7 += txr->q_tso_err;
4365 }
4366 adapter->efbig_tx_dma_setup.ev_count = v0;
4367 adapter->mbuf_defrag_failed.ev_count = v1;
4368 adapter->efbig2_tx_dma_setup.ev_count = v2;
4369 adapter->einval_tx_dma_setup.ev_count = v3;
4370 adapter->other_tx_dma_setup.ev_count = v4;
4371 adapter->eagain_tx_dma_setup.ev_count = v5;
4372 adapter->enomem_tx_dma_setup.ev_count = v6;
4373 adapter->tso_err.ev_count = v7;
4374
4375 /*
4376 * Check the TX queues status
4377 * - mark hung queues so we don't schedule on them
4378 * - watchdog only if all queues show hung
4379 */
4380 que = adapter->queues;
4381 for (i = 0; i < adapter->num_queues; i++, que++) {
4382 /* Keep track of queues with work for soft irq */
4383 if (que->txr->busy)
4384 queues |= ((u64)1 << que->me);
4385 /*
4386 * Each time txeof runs without cleaning, but there
4387 * are uncleaned descriptors it increments busy. If
4388 * we get to the MAX we declare it hung.
4389 */
4390 if (que->busy == IXGBE_QUEUE_HUNG) {
4391 ++hung;
4392 /* Mark the queue as inactive */
4393 adapter->active_queues &= ~((u64)1 << que->me);
4394 continue;
4395 } else {
4396 /* Check if we've come back from hung */
4397 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4398 adapter->active_queues |= ((u64)1 << que->me);
4399 }
4400 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4401 device_printf(dev,
4402 "Warning queue %d appears to be hung!\n", i);
4403 que->txr->busy = IXGBE_QUEUE_HUNG;
4404 ++hung;
4405 }
4406 }
4407
4408 /* Only truely watchdog if all queues show hung */
4409 if (hung == adapter->num_queues)
4410 goto watchdog;
4411 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4412 else if (queues != 0) { /* Force an IRQ on queues with work */
4413 que = adapter->queues;
4414 for (i = 0; i < adapter->num_queues; i++, que++) {
4415 mutex_enter(&que->dc_mtx);
4416 if (que->disabled_count == 0)
4417 ixgbe_rearm_queues(adapter,
4418 queues & ((u64)1 << i));
4419 mutex_exit(&que->dc_mtx);
4420 }
4421 }
4422 #endif
4423
4424 out:
4425 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4426 return;
4427
4428 watchdog:
4429 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4430 adapter->ifp->if_flags &= ~IFF_RUNNING;
4431 adapter->watchdog_events.ev_count++;
4432 ixgbe_init_locked(adapter);
4433 } /* ixgbe_local_timer */
4434
4435 /************************************************************************
4436 * ixgbe_sfp_probe
4437 *
4438 * Determine if a port had optics inserted.
4439 ************************************************************************/
4440 static bool
4441 ixgbe_sfp_probe(struct adapter *adapter)
4442 {
4443 struct ixgbe_hw *hw = &adapter->hw;
4444 device_t dev = adapter->dev;
4445 bool result = FALSE;
4446
4447 if ((hw->phy.type == ixgbe_phy_nl) &&
4448 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4449 s32 ret = hw->phy.ops.identify_sfp(hw);
4450 if (ret)
4451 goto out;
4452 ret = hw->phy.ops.reset(hw);
4453 adapter->sfp_probe = FALSE;
4454 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4455 device_printf(dev,"Unsupported SFP+ module detected!");
4456 device_printf(dev,
4457 "Reload driver with supported module.\n");
4458 goto out;
4459 } else
4460 device_printf(dev, "SFP+ module detected!\n");
4461 /* We now have supported optics */
4462 result = TRUE;
4463 }
4464 out:
4465
4466 return (result);
4467 } /* ixgbe_sfp_probe */
4468
4469 /************************************************************************
4470 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4471 ************************************************************************/
4472 static void
4473 ixgbe_handle_mod(void *context)
4474 {
4475 struct adapter *adapter = context;
4476 struct ixgbe_hw *hw = &adapter->hw;
4477 device_t dev = adapter->dev;
4478 u32 err, cage_full = 0;
4479
4480 ++adapter->mod_sicount.ev_count;
4481 if (adapter->hw.need_crosstalk_fix) {
4482 switch (hw->mac.type) {
4483 case ixgbe_mac_82599EB:
4484 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4485 IXGBE_ESDP_SDP2;
4486 break;
4487 case ixgbe_mac_X550EM_x:
4488 case ixgbe_mac_X550EM_a:
4489 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4490 IXGBE_ESDP_SDP0;
4491 break;
4492 default:
4493 break;
4494 }
4495
4496 if (!cage_full)
4497 return;
4498 }
4499
4500 err = hw->phy.ops.identify_sfp(hw);
4501 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4502 device_printf(dev,
4503 "Unsupported SFP+ module type was detected.\n");
4504 return;
4505 }
4506
4507 if (hw->mac.type == ixgbe_mac_82598EB)
4508 err = hw->phy.ops.reset(hw);
4509 else
4510 err = hw->mac.ops.setup_sfp(hw);
4511
4512 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4513 device_printf(dev,
4514 "Setup failure - unsupported SFP+ module type.\n");
4515 return;
4516 }
4517 softint_schedule(adapter->msf_si);
4518 } /* ixgbe_handle_mod */
4519
4520
4521 /************************************************************************
4522 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4523 ************************************************************************/
4524 static void
4525 ixgbe_handle_msf(void *context)
4526 {
4527 struct adapter *adapter = context;
4528 struct ixgbe_hw *hw = &adapter->hw;
4529 u32 autoneg;
4530 bool negotiate;
4531
4532 ++adapter->msf_sicount.ev_count;
4533 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4534 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4535
4536 autoneg = hw->phy.autoneg_advertised;
4537 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4538 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4539 else
4540 negotiate = 0;
4541 if (hw->mac.ops.setup_link)
4542 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4543
4544 /* Adjust media types shown in ifconfig */
4545 ifmedia_removeall(&adapter->media);
4546 ixgbe_add_media_types(adapter);
4547 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4548 } /* ixgbe_handle_msf */
4549
4550 /************************************************************************
4551 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4552 ************************************************************************/
4553 static void
4554 ixgbe_handle_phy(void *context)
4555 {
4556 struct adapter *adapter = context;
4557 struct ixgbe_hw *hw = &adapter->hw;
4558 int error;
4559
4560 ++adapter->phy_sicount.ev_count;
4561 error = hw->phy.ops.handle_lasi(hw);
4562 if (error == IXGBE_ERR_OVERTEMP)
4563 device_printf(adapter->dev,
4564 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4565 " PHY will downshift to lower power state!\n");
4566 else if (error)
4567 device_printf(adapter->dev,
4568 "Error handling LASI interrupt: %d\n", error);
4569 } /* ixgbe_handle_phy */
4570
4571 static void
4572 ixgbe_ifstop(struct ifnet *ifp, int disable)
4573 {
4574 struct adapter *adapter = ifp->if_softc;
4575
4576 IXGBE_CORE_LOCK(adapter);
4577 ixgbe_stop(adapter);
4578 IXGBE_CORE_UNLOCK(adapter);
4579 }
4580
4581 /************************************************************************
4582 * ixgbe_stop - Stop the hardware
4583 *
4584 * Disables all traffic on the adapter by issuing a
4585 * global reset on the MAC and deallocates TX/RX buffers.
4586 ************************************************************************/
4587 static void
4588 ixgbe_stop(void *arg)
4589 {
4590 struct ifnet *ifp;
4591 struct adapter *adapter = arg;
4592 struct ixgbe_hw *hw = &adapter->hw;
4593
4594 ifp = adapter->ifp;
4595
4596 KASSERT(mutex_owned(&adapter->core_mtx));
4597
4598 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4599 ixgbe_disable_intr(adapter);
4600 callout_stop(&adapter->timer);
4601
4602 /* Let the stack know...*/
4603 ifp->if_flags &= ~IFF_RUNNING;
4604
4605 ixgbe_reset_hw(hw);
4606 hw->adapter_stopped = FALSE;
4607 ixgbe_stop_adapter(hw);
4608 if (hw->mac.type == ixgbe_mac_82599EB)
4609 ixgbe_stop_mac_link_on_d3_82599(hw);
4610 /* Turn off the laser - noop with no optics */
4611 ixgbe_disable_tx_laser(hw);
4612
4613 /* Update the stack */
4614 adapter->link_up = FALSE;
4615 ixgbe_update_link_status(adapter);
4616
4617 /* reprogram the RAR[0] in case user changed it. */
4618 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4619
4620 return;
4621 } /* ixgbe_stop */
4622
4623 /************************************************************************
4624 * ixgbe_update_link_status - Update OS on link state
4625 *
4626 * Note: Only updates the OS on the cached link state.
4627 * The real check of the hardware only happens with
4628 * a link interrupt.
4629 ************************************************************************/
4630 static void
4631 ixgbe_update_link_status(struct adapter *adapter)
4632 {
4633 struct ifnet *ifp = adapter->ifp;
4634 device_t dev = adapter->dev;
4635 struct ixgbe_hw *hw = &adapter->hw;
4636
4637 KASSERT(mutex_owned(&adapter->core_mtx));
4638
4639 if (adapter->link_up) {
4640 if (adapter->link_active == FALSE) {
4641 /*
4642 * To eliminate influence of the previous state
4643 * in the same way as ixgbe_init_locked().
4644 */
4645 struct ix_queue *que = adapter->queues;
4646 for (int i = 0; i < adapter->num_queues; i++, que++)
4647 que->eitr_setting = 0;
4648
4649 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4650 /*
4651 * Discard count for both MAC Local Fault and
4652 * Remote Fault because those registers are
4653 * valid only when the link speed is up and
4654 * 10Gbps.
4655 */
4656 IXGBE_READ_REG(hw, IXGBE_MLFC);
4657 IXGBE_READ_REG(hw, IXGBE_MRFC);
4658 }
4659
4660 if (bootverbose) {
4661 const char *bpsmsg;
4662
4663 switch (adapter->link_speed) {
4664 case IXGBE_LINK_SPEED_10GB_FULL:
4665 bpsmsg = "10 Gbps";
4666 break;
4667 case IXGBE_LINK_SPEED_5GB_FULL:
4668 bpsmsg = "5 Gbps";
4669 break;
4670 case IXGBE_LINK_SPEED_2_5GB_FULL:
4671 bpsmsg = "2.5 Gbps";
4672 break;
4673 case IXGBE_LINK_SPEED_1GB_FULL:
4674 bpsmsg = "1 Gbps";
4675 break;
4676 case IXGBE_LINK_SPEED_100_FULL:
4677 bpsmsg = "100 Mbps";
4678 break;
4679 case IXGBE_LINK_SPEED_10_FULL:
4680 bpsmsg = "10 Mbps";
4681 break;
4682 default:
4683 bpsmsg = "unknown speed";
4684 break;
4685 }
4686 device_printf(dev, "Link is up %s %s \n",
4687 bpsmsg, "Full Duplex");
4688 }
4689 adapter->link_active = TRUE;
4690 /* Update any Flow Control changes */
4691 ixgbe_fc_enable(&adapter->hw);
4692 /* Update DMA coalescing config */
4693 ixgbe_config_dmac(adapter);
4694 if_link_state_change(ifp, LINK_STATE_UP);
4695
4696 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4697 ixgbe_ping_all_vfs(adapter);
4698 }
4699 } else { /* Link down */
4700 if (adapter->link_active == TRUE) {
4701 if (bootverbose)
4702 device_printf(dev, "Link is Down\n");
4703 if_link_state_change(ifp, LINK_STATE_DOWN);
4704 adapter->link_active = FALSE;
4705 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4706 ixgbe_ping_all_vfs(adapter);
4707 ixgbe_drain_all(adapter);
4708 }
4709 }
4710 } /* ixgbe_update_link_status */
4711
4712 /************************************************************************
4713 * ixgbe_config_dmac - Configure DMA Coalescing
4714 ************************************************************************/
4715 static void
4716 ixgbe_config_dmac(struct adapter *adapter)
4717 {
4718 struct ixgbe_hw *hw = &adapter->hw;
4719 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4720
4721 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4722 return;
4723
4724 if (dcfg->watchdog_timer ^ adapter->dmac ||
4725 dcfg->link_speed ^ adapter->link_speed) {
4726 dcfg->watchdog_timer = adapter->dmac;
4727 dcfg->fcoe_en = false;
4728 dcfg->link_speed = adapter->link_speed;
4729 dcfg->num_tcs = 1;
4730
4731 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4732 dcfg->watchdog_timer, dcfg->link_speed);
4733
4734 hw->mac.ops.dmac_config(hw);
4735 }
4736 } /* ixgbe_config_dmac */
4737
4738 /************************************************************************
4739 * ixgbe_enable_intr
4740 ************************************************************************/
4741 static void
4742 ixgbe_enable_intr(struct adapter *adapter)
4743 {
4744 struct ixgbe_hw *hw = &adapter->hw;
4745 struct ix_queue *que = adapter->queues;
4746 u32 mask, fwsm;
4747
4748 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4749
4750 switch (adapter->hw.mac.type) {
4751 case ixgbe_mac_82599EB:
4752 mask |= IXGBE_EIMS_ECC;
4753 /* Temperature sensor on some adapters */
4754 mask |= IXGBE_EIMS_GPI_SDP0;
4755 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4756 mask |= IXGBE_EIMS_GPI_SDP1;
4757 mask |= IXGBE_EIMS_GPI_SDP2;
4758 break;
4759 case ixgbe_mac_X540:
4760 /* Detect if Thermal Sensor is enabled */
4761 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4762 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4763 mask |= IXGBE_EIMS_TS;
4764 mask |= IXGBE_EIMS_ECC;
4765 break;
4766 case ixgbe_mac_X550:
4767 /* MAC thermal sensor is automatically enabled */
4768 mask |= IXGBE_EIMS_TS;
4769 mask |= IXGBE_EIMS_ECC;
4770 break;
4771 case ixgbe_mac_X550EM_x:
4772 case ixgbe_mac_X550EM_a:
4773 /* Some devices use SDP0 for important information */
4774 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4775 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4776 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4777 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4778 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4779 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4780 mask |= IXGBE_EICR_GPI_SDP0_X540;
4781 mask |= IXGBE_EIMS_ECC;
4782 break;
4783 default:
4784 break;
4785 }
4786
4787 /* Enable Fan Failure detection */
4788 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4789 mask |= IXGBE_EIMS_GPI_SDP1;
4790 /* Enable SR-IOV */
4791 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4792 mask |= IXGBE_EIMS_MAILBOX;
4793 /* Enable Flow Director */
4794 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4795 mask |= IXGBE_EIMS_FLOW_DIR;
4796
4797 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4798
4799 /* With MSI-X we use auto clear */
4800 if (adapter->msix_mem) {
4801 mask = IXGBE_EIMS_ENABLE_MASK;
4802 /* Don't autoclear Link */
4803 mask &= ~IXGBE_EIMS_OTHER;
4804 mask &= ~IXGBE_EIMS_LSC;
4805 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4806 mask &= ~IXGBE_EIMS_MAILBOX;
4807 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4808 }
4809
4810 /*
4811 * Now enable all queues, this is done separately to
4812 * allow for handling the extended (beyond 32) MSI-X
4813 * vectors that can be used by 82599
4814 */
4815 for (int i = 0; i < adapter->num_queues; i++, que++)
4816 ixgbe_enable_queue(adapter, que->msix);
4817
4818 IXGBE_WRITE_FLUSH(hw);
4819
4820 } /* ixgbe_enable_intr */
4821
4822 /************************************************************************
4823 * ixgbe_disable_intr_internal
4824 ************************************************************************/
4825 static void
4826 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4827 {
4828 struct ix_queue *que = adapter->queues;
4829
4830 /* disable interrupts other than queues */
4831 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4832
4833 if (adapter->msix_mem)
4834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4835
4836 for (int i = 0; i < adapter->num_queues; i++, que++)
4837 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4838
4839 IXGBE_WRITE_FLUSH(&adapter->hw);
4840
4841 } /* ixgbe_do_disable_intr_internal */
4842
4843 /************************************************************************
4844 * ixgbe_disable_intr
4845 ************************************************************************/
4846 static void
4847 ixgbe_disable_intr(struct adapter *adapter)
4848 {
4849
4850 ixgbe_disable_intr_internal(adapter, true);
4851 } /* ixgbe_disable_intr */
4852
4853 /************************************************************************
4854 * ixgbe_ensure_disabled_intr
4855 ************************************************************************/
4856 void
4857 ixgbe_ensure_disabled_intr(struct adapter *adapter)
4858 {
4859
4860 ixgbe_disable_intr_internal(adapter, false);
4861 } /* ixgbe_ensure_disabled_intr */
4862
4863 /************************************************************************
4864 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4865 ************************************************************************/
4866 static int
4867 ixgbe_legacy_irq(void *arg)
4868 {
4869 struct ix_queue *que = arg;
4870 struct adapter *adapter = que->adapter;
4871 struct ixgbe_hw *hw = &adapter->hw;
4872 struct ifnet *ifp = adapter->ifp;
4873 struct tx_ring *txr = adapter->tx_rings;
4874 bool more = false;
4875 u32 eicr, eicr_mask;
4876
4877 /* Silicon errata #26 on 82598 */
4878 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4879
4880 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4881
4882 adapter->stats.pf.legint.ev_count++;
4883 ++que->irqs.ev_count;
4884 if (eicr == 0) {
4885 adapter->stats.pf.intzero.ev_count++;
4886 if ((ifp->if_flags & IFF_UP) != 0)
4887 ixgbe_enable_intr(adapter);
4888 return 0;
4889 }
4890
4891 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4892 /*
4893 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
4894 */
4895 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
4896
4897 #ifdef __NetBSD__
4898 /* Don't run ixgbe_rxeof in interrupt context */
4899 more = true;
4900 #else
4901 more = ixgbe_rxeof(que);
4902 #endif
4903
4904 IXGBE_TX_LOCK(txr);
4905 ixgbe_txeof(txr);
4906 #ifdef notyet
4907 if (!ixgbe_ring_empty(ifp, txr->br))
4908 ixgbe_start_locked(ifp, txr);
4909 #endif
4910 IXGBE_TX_UNLOCK(txr);
4911 }
4912
4913 /* Check for fan failure */
4914 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4915 ixgbe_check_fan_failure(adapter, eicr, true);
4916 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4917 }
4918
4919 /* Link status change */
4920 if (eicr & IXGBE_EICR_LSC)
4921 softint_schedule(adapter->link_si);
4922
4923 if (ixgbe_is_sfp(hw)) {
4924 /* Pluggable optics-related interrupt */
4925 if (hw->mac.type >= ixgbe_mac_X540)
4926 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4927 else
4928 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4929
4930 if (eicr & eicr_mask) {
4931 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4932 softint_schedule(adapter->mod_si);
4933 }
4934
4935 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4936 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4937 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4938 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4939 softint_schedule(adapter->msf_si);
4940 }
4941 }
4942
4943 /* External PHY interrupt */
4944 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4945 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4946 softint_schedule(adapter->phy_si);
4947
4948 if (more) {
4949 que->req.ev_count++;
4950 ixgbe_sched_handle_que(adapter, que);
4951 } else
4952 ixgbe_enable_intr(adapter);
4953
4954 return 1;
4955 } /* ixgbe_legacy_irq */
4956
4957 /************************************************************************
4958 * ixgbe_free_pciintr_resources
4959 ************************************************************************/
4960 static void
4961 ixgbe_free_pciintr_resources(struct adapter *adapter)
4962 {
4963 struct ix_queue *que = adapter->queues;
4964 int rid;
4965
4966 /*
4967 * Release all msix queue resources:
4968 */
4969 for (int i = 0; i < adapter->num_queues; i++, que++) {
4970 if (que->res != NULL) {
4971 pci_intr_disestablish(adapter->osdep.pc,
4972 adapter->osdep.ihs[i]);
4973 adapter->osdep.ihs[i] = NULL;
4974 }
4975 }
4976
4977 /* Clean the Legacy or Link interrupt last */
4978 if (adapter->vector) /* we are doing MSIX */
4979 rid = adapter->vector;
4980 else
4981 rid = 0;
4982
4983 if (adapter->osdep.ihs[rid] != NULL) {
4984 pci_intr_disestablish(adapter->osdep.pc,
4985 adapter->osdep.ihs[rid]);
4986 adapter->osdep.ihs[rid] = NULL;
4987 }
4988
4989 if (adapter->osdep.intrs != NULL) {
4990 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4991 adapter->osdep.nintrs);
4992 adapter->osdep.intrs = NULL;
4993 }
4994 } /* ixgbe_free_pciintr_resources */
4995
4996 /************************************************************************
4997 * ixgbe_free_pci_resources
4998 ************************************************************************/
4999 static void
5000 ixgbe_free_pci_resources(struct adapter *adapter)
5001 {
5002
5003 ixgbe_free_pciintr_resources(adapter);
5004
5005 if (adapter->osdep.mem_size != 0) {
5006 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5007 adapter->osdep.mem_bus_space_handle,
5008 adapter->osdep.mem_size);
5009 }
5010
5011 } /* ixgbe_free_pci_resources */
5012
5013 /************************************************************************
5014 * ixgbe_set_sysctl_value
5015 ************************************************************************/
5016 static void
5017 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5018 const char *description, int *limit, int value)
5019 {
5020 device_t dev = adapter->dev;
5021 struct sysctllog **log;
5022 const struct sysctlnode *rnode, *cnode;
5023
5024 log = &adapter->sysctllog;
5025 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5026 aprint_error_dev(dev, "could not create sysctl root\n");
5027 return;
5028 }
5029 if (sysctl_createv(log, 0, &rnode, &cnode,
5030 CTLFLAG_READWRITE, CTLTYPE_INT,
5031 name, SYSCTL_DESCR(description),
5032 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5033 aprint_error_dev(dev, "could not create sysctl\n");
5034 *limit = value;
5035 } /* ixgbe_set_sysctl_value */
5036
5037 /************************************************************************
5038 * ixgbe_sysctl_flowcntl
5039 *
5040 * SYSCTL wrapper around setting Flow Control
5041 ************************************************************************/
5042 static int
5043 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5044 {
5045 struct sysctlnode node = *rnode;
5046 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5047 int error, fc;
5048
5049 fc = adapter->hw.fc.current_mode;
5050 node.sysctl_data = &fc;
5051 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5052 if (error != 0 || newp == NULL)
5053 return error;
5054
5055 /* Don't bother if it's not changed */
5056 if (fc == adapter->hw.fc.current_mode)
5057 return (0);
5058
5059 return ixgbe_set_flowcntl(adapter, fc);
5060 } /* ixgbe_sysctl_flowcntl */
5061
5062 /************************************************************************
5063 * ixgbe_set_flowcntl - Set flow control
5064 *
5065 * Flow control values:
5066 * 0 - off
5067 * 1 - rx pause
5068 * 2 - tx pause
5069 * 3 - full
5070 ************************************************************************/
5071 static int
5072 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5073 {
5074 switch (fc) {
5075 case ixgbe_fc_rx_pause:
5076 case ixgbe_fc_tx_pause:
5077 case ixgbe_fc_full:
5078 adapter->hw.fc.requested_mode = fc;
5079 if (adapter->num_queues > 1)
5080 ixgbe_disable_rx_drop(adapter);
5081 break;
5082 case ixgbe_fc_none:
5083 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5084 if (adapter->num_queues > 1)
5085 ixgbe_enable_rx_drop(adapter);
5086 break;
5087 default:
5088 return (EINVAL);
5089 }
5090
5091 #if 0 /* XXX NetBSD */
5092 /* Don't autoneg if forcing a value */
5093 adapter->hw.fc.disable_fc_autoneg = TRUE;
5094 #endif
5095 ixgbe_fc_enable(&adapter->hw);
5096
5097 return (0);
5098 } /* ixgbe_set_flowcntl */
5099
5100 /************************************************************************
5101 * ixgbe_enable_rx_drop
5102 *
5103 * Enable the hardware to drop packets when the buffer is
5104 * full. This is useful with multiqueue, so that no single
5105 * queue being full stalls the entire RX engine. We only
5106 * enable this when Multiqueue is enabled AND Flow Control
5107 * is disabled.
5108 ************************************************************************/
5109 static void
5110 ixgbe_enable_rx_drop(struct adapter *adapter)
5111 {
5112 struct ixgbe_hw *hw = &adapter->hw;
5113 struct rx_ring *rxr;
5114 u32 srrctl;
5115
5116 for (int i = 0; i < adapter->num_queues; i++) {
5117 rxr = &adapter->rx_rings[i];
5118 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5119 srrctl |= IXGBE_SRRCTL_DROP_EN;
5120 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5121 }
5122
5123 /* enable drop for each vf */
5124 for (int i = 0; i < adapter->num_vfs; i++) {
5125 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5126 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5127 IXGBE_QDE_ENABLE));
5128 }
5129 } /* ixgbe_enable_rx_drop */
5130
5131 /************************************************************************
5132 * ixgbe_disable_rx_drop
5133 ************************************************************************/
5134 static void
5135 ixgbe_disable_rx_drop(struct adapter *adapter)
5136 {
5137 struct ixgbe_hw *hw = &adapter->hw;
5138 struct rx_ring *rxr;
5139 u32 srrctl;
5140
5141 for (int i = 0; i < adapter->num_queues; i++) {
5142 rxr = &adapter->rx_rings[i];
5143 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5144 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5145 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5146 }
5147
5148 /* disable drop for each vf */
5149 for (int i = 0; i < adapter->num_vfs; i++) {
5150 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5151 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5152 }
5153 } /* ixgbe_disable_rx_drop */
5154
5155 /************************************************************************
5156 * ixgbe_sysctl_advertise
5157 *
5158 * SYSCTL wrapper around setting advertised speed
5159 ************************************************************************/
5160 static int
5161 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5162 {
5163 struct sysctlnode node = *rnode;
5164 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5165 int error = 0, advertise;
5166
5167 advertise = adapter->advertise;
5168 node.sysctl_data = &advertise;
5169 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5170 if (error != 0 || newp == NULL)
5171 return error;
5172
5173 return ixgbe_set_advertise(adapter, advertise);
5174 } /* ixgbe_sysctl_advertise */
5175
5176 /************************************************************************
5177 * ixgbe_set_advertise - Control advertised link speed
5178 *
5179 * Flags:
5180 * 0x00 - Default (all capable link speed)
5181 * 0x01 - advertise 100 Mb
5182 * 0x02 - advertise 1G
5183 * 0x04 - advertise 10G
5184 * 0x08 - advertise 10 Mb
5185 * 0x10 - advertise 2.5G
5186 * 0x20 - advertise 5G
5187 ************************************************************************/
5188 static int
5189 ixgbe_set_advertise(struct adapter *adapter, int advertise)
5190 {
5191 device_t dev;
5192 struct ixgbe_hw *hw;
5193 ixgbe_link_speed speed = 0;
5194 ixgbe_link_speed link_caps = 0;
5195 s32 err = IXGBE_NOT_IMPLEMENTED;
5196 bool negotiate = FALSE;
5197
5198 /* Checks to validate new value */
5199 if (adapter->advertise == advertise) /* no change */
5200 return (0);
5201
5202 dev = adapter->dev;
5203 hw = &adapter->hw;
5204
5205 /* No speed changes for backplane media */
5206 if (hw->phy.media_type == ixgbe_media_type_backplane)
5207 return (ENODEV);
5208
5209 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5210 (hw->phy.multispeed_fiber))) {
5211 device_printf(dev,
5212 "Advertised speed can only be set on copper or "
5213 "multispeed fiber media types.\n");
5214 return (EINVAL);
5215 }
5216
5217 if (advertise < 0x0 || advertise > 0x2f) {
5218 device_printf(dev,
5219 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5220 return (EINVAL);
5221 }
5222
5223 if (hw->mac.ops.get_link_capabilities) {
5224 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5225 &negotiate);
5226 if (err != IXGBE_SUCCESS) {
5227 device_printf(dev, "Unable to determine supported advertise speeds\n");
5228 return (ENODEV);
5229 }
5230 }
5231
5232 /* Set new value and report new advertised mode */
5233 if (advertise & 0x1) {
5234 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5235 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5236 return (EINVAL);
5237 }
5238 speed |= IXGBE_LINK_SPEED_100_FULL;
5239 }
5240 if (advertise & 0x2) {
5241 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5242 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5243 return (EINVAL);
5244 }
5245 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5246 }
5247 if (advertise & 0x4) {
5248 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5249 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5250 return (EINVAL);
5251 }
5252 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5253 }
5254 if (advertise & 0x8) {
5255 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5256 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5257 return (EINVAL);
5258 }
5259 speed |= IXGBE_LINK_SPEED_10_FULL;
5260 }
5261 if (advertise & 0x10) {
5262 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5263 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5264 return (EINVAL);
5265 }
5266 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5267 }
5268 if (advertise & 0x20) {
5269 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5270 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5271 return (EINVAL);
5272 }
5273 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5274 }
5275 if (advertise == 0)
5276 speed = link_caps; /* All capable link speed */
5277
5278 hw->mac.autotry_restart = TRUE;
5279 hw->mac.ops.setup_link(hw, speed, TRUE);
5280 adapter->advertise = advertise;
5281
5282 return (0);
5283 } /* ixgbe_set_advertise */
5284
5285 /************************************************************************
5286 * ixgbe_get_advertise - Get current advertised speed settings
5287 *
5288 * Formatted for sysctl usage.
5289 * Flags:
5290 * 0x01 - advertise 100 Mb
5291 * 0x02 - advertise 1G
5292 * 0x04 - advertise 10G
5293 * 0x08 - advertise 10 Mb (yes, Mb)
5294 * 0x10 - advertise 2.5G
5295 * 0x20 - advertise 5G
5296 ************************************************************************/
5297 static int
5298 ixgbe_get_advertise(struct adapter *adapter)
5299 {
5300 struct ixgbe_hw *hw = &adapter->hw;
5301 int speed;
5302 ixgbe_link_speed link_caps = 0;
5303 s32 err;
5304 bool negotiate = FALSE;
5305
5306 /*
5307 * Advertised speed means nothing unless it's copper or
5308 * multi-speed fiber
5309 */
5310 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5311 !(hw->phy.multispeed_fiber))
5312 return (0);
5313
5314 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5315 if (err != IXGBE_SUCCESS)
5316 return (0);
5317
5318 speed =
5319 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5320 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5321 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5322 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5323 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5324 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5325
5326 return speed;
5327 } /* ixgbe_get_advertise */
5328
5329 /************************************************************************
5330 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5331 *
5332 * Control values:
5333 * 0/1 - off / on (use default value of 1000)
5334 *
5335 * Legal timer values are:
5336 * 50,100,250,500,1000,2000,5000,10000
5337 *
5338 * Turning off interrupt moderation will also turn this off.
5339 ************************************************************************/
5340 static int
5341 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5342 {
5343 struct sysctlnode node = *rnode;
5344 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5345 struct ifnet *ifp = adapter->ifp;
5346 int error;
5347 int newval;
5348
5349 newval = adapter->dmac;
5350 node.sysctl_data = &newval;
5351 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5352 if ((error) || (newp == NULL))
5353 return (error);
5354
5355 switch (newval) {
5356 case 0:
5357 /* Disabled */
5358 adapter->dmac = 0;
5359 break;
5360 case 1:
5361 /* Enable and use default */
5362 adapter->dmac = 1000;
5363 break;
5364 case 50:
5365 case 100:
5366 case 250:
5367 case 500:
5368 case 1000:
5369 case 2000:
5370 case 5000:
5371 case 10000:
5372 /* Legal values - allow */
5373 adapter->dmac = newval;
5374 break;
5375 default:
5376 /* Do nothing, illegal value */
5377 return (EINVAL);
5378 }
5379
5380 /* Re-initialize hardware if it's already running */
5381 if (ifp->if_flags & IFF_RUNNING)
5382 ifp->if_init(ifp);
5383
5384 return (0);
5385 }
5386
5387 #ifdef IXGBE_DEBUG
5388 /************************************************************************
5389 * ixgbe_sysctl_power_state
5390 *
5391 * Sysctl to test power states
5392 * Values:
5393 * 0 - set device to D0
5394 * 3 - set device to D3
5395 * (none) - get current device power state
5396 ************************************************************************/
5397 static int
5398 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5399 {
5400 #ifdef notyet
5401 struct sysctlnode node = *rnode;
5402 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5403 device_t dev = adapter->dev;
5404 int curr_ps, new_ps, error = 0;
5405
5406 curr_ps = new_ps = pci_get_powerstate(dev);
5407
5408 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5409 if ((error) || (req->newp == NULL))
5410 return (error);
5411
5412 if (new_ps == curr_ps)
5413 return (0);
5414
5415 if (new_ps == 3 && curr_ps == 0)
5416 error = DEVICE_SUSPEND(dev);
5417 else if (new_ps == 0 && curr_ps == 3)
5418 error = DEVICE_RESUME(dev);
5419 else
5420 return (EINVAL);
5421
5422 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5423
5424 return (error);
5425 #else
5426 return 0;
5427 #endif
5428 } /* ixgbe_sysctl_power_state */
5429 #endif
5430
5431 /************************************************************************
5432 * ixgbe_sysctl_wol_enable
5433 *
5434 * Sysctl to enable/disable the WoL capability,
5435 * if supported by the adapter.
5436 *
5437 * Values:
5438 * 0 - disabled
5439 * 1 - enabled
5440 ************************************************************************/
5441 static int
5442 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5443 {
5444 struct sysctlnode node = *rnode;
5445 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5446 struct ixgbe_hw *hw = &adapter->hw;
5447 bool new_wol_enabled;
5448 int error = 0;
5449
5450 new_wol_enabled = hw->wol_enabled;
5451 node.sysctl_data = &new_wol_enabled;
5452 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5453 if ((error) || (newp == NULL))
5454 return (error);
5455 if (new_wol_enabled == hw->wol_enabled)
5456 return (0);
5457
5458 if (new_wol_enabled && !adapter->wol_support)
5459 return (ENODEV);
5460 else
5461 hw->wol_enabled = new_wol_enabled;
5462
5463 return (0);
5464 } /* ixgbe_sysctl_wol_enable */
5465
5466 /************************************************************************
5467 * ixgbe_sysctl_wufc - Wake Up Filter Control
5468 *
5469 * Sysctl to enable/disable the types of packets that the
5470 * adapter will wake up on upon receipt.
5471 * Flags:
5472 * 0x1 - Link Status Change
5473 * 0x2 - Magic Packet
5474 * 0x4 - Direct Exact
5475 * 0x8 - Directed Multicast
5476 * 0x10 - Broadcast
5477 * 0x20 - ARP/IPv4 Request Packet
5478 * 0x40 - Direct IPv4 Packet
5479 * 0x80 - Direct IPv6 Packet
5480 *
5481 * Settings not listed above will cause the sysctl to return an error.
5482 ************************************************************************/
5483 static int
5484 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5485 {
5486 struct sysctlnode node = *rnode;
5487 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5488 int error = 0;
5489 u32 new_wufc;
5490
5491 new_wufc = adapter->wufc;
5492 node.sysctl_data = &new_wufc;
5493 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5494 if ((error) || (newp == NULL))
5495 return (error);
5496 if (new_wufc == adapter->wufc)
5497 return (0);
5498
5499 if (new_wufc & 0xffffff00)
5500 return (EINVAL);
5501
5502 new_wufc &= 0xff;
5503 new_wufc |= (0xffffff & adapter->wufc);
5504 adapter->wufc = new_wufc;
5505
5506 return (0);
5507 } /* ixgbe_sysctl_wufc */
5508
5509 #ifdef IXGBE_DEBUG
5510 /************************************************************************
5511 * ixgbe_sysctl_print_rss_config
5512 ************************************************************************/
5513 static int
5514 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5515 {
5516 #ifdef notyet
5517 struct sysctlnode node = *rnode;
5518 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5519 struct ixgbe_hw *hw = &adapter->hw;
5520 device_t dev = adapter->dev;
5521 struct sbuf *buf;
5522 int error = 0, reta_size;
5523 u32 reg;
5524
5525 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5526 if (!buf) {
5527 device_printf(dev, "Could not allocate sbuf for output.\n");
5528 return (ENOMEM);
5529 }
5530
5531 // TODO: use sbufs to make a string to print out
5532 /* Set multiplier for RETA setup and table size based on MAC */
5533 switch (adapter->hw.mac.type) {
5534 case ixgbe_mac_X550:
5535 case ixgbe_mac_X550EM_x:
5536 case ixgbe_mac_X550EM_a:
5537 reta_size = 128;
5538 break;
5539 default:
5540 reta_size = 32;
5541 break;
5542 }
5543
5544 /* Print out the redirection table */
5545 sbuf_cat(buf, "\n");
5546 for (int i = 0; i < reta_size; i++) {
5547 if (i < 32) {
5548 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5549 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5550 } else {
5551 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5552 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5553 }
5554 }
5555
5556 // TODO: print more config
5557
5558 error = sbuf_finish(buf);
5559 if (error)
5560 device_printf(dev, "Error finishing sbuf: %d\n", error);
5561
5562 sbuf_delete(buf);
5563 #endif
5564 return (0);
5565 } /* ixgbe_sysctl_print_rss_config */
5566 #endif /* IXGBE_DEBUG */
5567
5568 /************************************************************************
5569 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5570 *
5571 * For X552/X557-AT devices using an external PHY
5572 ************************************************************************/
5573 static int
5574 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5575 {
5576 struct sysctlnode node = *rnode;
5577 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5578 struct ixgbe_hw *hw = &adapter->hw;
5579 int val;
5580 u16 reg;
5581 int error;
5582
5583 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5584 device_printf(adapter->dev,
5585 "Device has no supported external thermal sensor.\n");
5586 return (ENODEV);
5587 }
5588
5589 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5590 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5591 device_printf(adapter->dev,
5592 "Error reading from PHY's current temperature register\n");
5593 return (EAGAIN);
5594 }
5595
5596 node.sysctl_data = &val;
5597
5598 /* Shift temp for output */
5599 val = reg >> 8;
5600
5601 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5602 if ((error) || (newp == NULL))
5603 return (error);
5604
5605 return (0);
5606 } /* ixgbe_sysctl_phy_temp */
5607
5608 /************************************************************************
5609 * ixgbe_sysctl_phy_overtemp_occurred
5610 *
5611 * Reports (directly from the PHY) whether the current PHY
5612 * temperature is over the overtemp threshold.
5613 ************************************************************************/
5614 static int
5615 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5616 {
5617 struct sysctlnode node = *rnode;
5618 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5619 struct ixgbe_hw *hw = &adapter->hw;
5620 int val, error;
5621 u16 reg;
5622
5623 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5624 device_printf(adapter->dev,
5625 "Device has no supported external thermal sensor.\n");
5626 return (ENODEV);
5627 }
5628
5629 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5630 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5631 device_printf(adapter->dev,
5632 "Error reading from PHY's temperature status register\n");
5633 return (EAGAIN);
5634 }
5635
5636 node.sysctl_data = &val;
5637
5638 /* Get occurrence bit */
5639 val = !!(reg & 0x4000);
5640
5641 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5642 if ((error) || (newp == NULL))
5643 return (error);
5644
5645 return (0);
5646 } /* ixgbe_sysctl_phy_overtemp_occurred */
5647
5648 /************************************************************************
5649 * ixgbe_sysctl_eee_state
5650 *
5651 * Sysctl to set EEE power saving feature
5652 * Values:
5653 * 0 - disable EEE
5654 * 1 - enable EEE
5655 * (none) - get current device EEE state
5656 ************************************************************************/
5657 static int
5658 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5659 {
5660 struct sysctlnode node = *rnode;
5661 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5662 struct ifnet *ifp = adapter->ifp;
5663 device_t dev = adapter->dev;
5664 int curr_eee, new_eee, error = 0;
5665 s32 retval;
5666
5667 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5668 node.sysctl_data = &new_eee;
5669 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5670 if ((error) || (newp == NULL))
5671 return (error);
5672
5673 /* Nothing to do */
5674 if (new_eee == curr_eee)
5675 return (0);
5676
5677 /* Not supported */
5678 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5679 return (EINVAL);
5680
5681 /* Bounds checking */
5682 if ((new_eee < 0) || (new_eee > 1))
5683 return (EINVAL);
5684
5685 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5686 if (retval) {
5687 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5688 return (EINVAL);
5689 }
5690
5691 /* Restart auto-neg */
5692 ifp->if_init(ifp);
5693
5694 device_printf(dev, "New EEE state: %d\n", new_eee);
5695
5696 /* Cache new value */
5697 if (new_eee)
5698 adapter->feat_en |= IXGBE_FEATURE_EEE;
5699 else
5700 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5701
5702 return (error);
5703 } /* ixgbe_sysctl_eee_state */
5704
5705 #define PRINTQS(adapter, regname) \
5706 do { \
5707 struct ixgbe_hw *_hw = &(adapter)->hw; \
5708 int _i; \
5709 \
5710 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5711 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5712 printf((_i == 0) ? "\t" : " "); \
5713 printf("%08x", IXGBE_READ_REG(_hw, \
5714 IXGBE_##regname(_i))); \
5715 } \
5716 printf("\n"); \
5717 } while (0)
5718
5719 /************************************************************************
5720 * ixgbe_print_debug_info
5721 *
5722 * Called only when em_display_debug_stats is enabled.
5723 * Provides a way to take a look at important statistics
5724 * maintained by the driver and hardware.
5725 ************************************************************************/
5726 static void
5727 ixgbe_print_debug_info(struct adapter *adapter)
5728 {
5729 device_t dev = adapter->dev;
5730 struct ixgbe_hw *hw = &adapter->hw;
5731 int table_size;
5732 int i;
5733
5734 switch (adapter->hw.mac.type) {
5735 case ixgbe_mac_X550:
5736 case ixgbe_mac_X550EM_x:
5737 case ixgbe_mac_X550EM_a:
5738 table_size = 128;
5739 break;
5740 default:
5741 table_size = 32;
5742 break;
5743 }
5744
5745 device_printf(dev, "[E]RETA:\n");
5746 for (i = 0; i < table_size; i++) {
5747 if (i < 32)
5748 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5749 IXGBE_RETA(i)));
5750 else
5751 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5752 IXGBE_ERETA(i - 32)));
5753 }
5754
5755 device_printf(dev, "queue:");
5756 for (i = 0; i < adapter->num_queues; i++) {
5757 printf((i == 0) ? "\t" : " ");
5758 printf("%8d", i);
5759 }
5760 printf("\n");
5761 PRINTQS(adapter, RDBAL);
5762 PRINTQS(adapter, RDBAH);
5763 PRINTQS(adapter, RDLEN);
5764 PRINTQS(adapter, SRRCTL);
5765 PRINTQS(adapter, RDH);
5766 PRINTQS(adapter, RDT);
5767 PRINTQS(adapter, RXDCTL);
5768
5769 device_printf(dev, "RQSMR:");
5770 for (i = 0; i < adapter->num_queues / 4; i++) {
5771 printf((i == 0) ? "\t" : " ");
5772 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5773 }
5774 printf("\n");
5775
5776 device_printf(dev, "disabled_count:");
5777 for (i = 0; i < adapter->num_queues; i++) {
5778 printf((i == 0) ? "\t" : " ");
5779 printf("%8d", adapter->queues[i].disabled_count);
5780 }
5781 printf("\n");
5782
5783 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5784 if (hw->mac.type != ixgbe_mac_82598EB) {
5785 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5786 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5787 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5788 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5789 }
5790 } /* ixgbe_print_debug_info */
5791
5792 /************************************************************************
5793 * ixgbe_sysctl_debug
5794 ************************************************************************/
5795 static int
5796 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5797 {
5798 struct sysctlnode node = *rnode;
5799 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5800 int error, result = 0;
5801
5802 node.sysctl_data = &result;
5803 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5804
5805 if (error || newp == NULL)
5806 return error;
5807
5808 if (result == 1)
5809 ixgbe_print_debug_info(adapter);
5810
5811 return 0;
5812 } /* ixgbe_sysctl_debug */
5813
5814 /************************************************************************
5815 * ixgbe_init_device_features
5816 ************************************************************************/
5817 static void
5818 ixgbe_init_device_features(struct adapter *adapter)
5819 {
5820 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5821 | IXGBE_FEATURE_RSS
5822 | IXGBE_FEATURE_MSI
5823 | IXGBE_FEATURE_MSIX
5824 | IXGBE_FEATURE_LEGACY_IRQ
5825 | IXGBE_FEATURE_LEGACY_TX;
5826
5827 /* Set capabilities first... */
5828 switch (adapter->hw.mac.type) {
5829 case ixgbe_mac_82598EB:
5830 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5831 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5832 break;
5833 case ixgbe_mac_X540:
5834 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5835 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5836 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5837 (adapter->hw.bus.func == 0))
5838 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5839 break;
5840 case ixgbe_mac_X550:
5841 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5842 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5843 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5844 break;
5845 case ixgbe_mac_X550EM_x:
5846 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5847 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5848 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5849 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5850 break;
5851 case ixgbe_mac_X550EM_a:
5852 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5853 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5854 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5855 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5856 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5857 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5858 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5859 }
5860 break;
5861 case ixgbe_mac_82599EB:
5862 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5863 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5864 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5865 (adapter->hw.bus.func == 0))
5866 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5867 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5868 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5869 break;
5870 default:
5871 break;
5872 }
5873
5874 /* Enabled by default... */
5875 /* Fan failure detection */
5876 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5877 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5878 /* Netmap */
5879 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5880 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5881 /* EEE */
5882 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5883 adapter->feat_en |= IXGBE_FEATURE_EEE;
5884 /* Thermal Sensor */
5885 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5886 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5887
5888 /* Enabled via global sysctl... */
5889 /* Flow Director */
5890 if (ixgbe_enable_fdir) {
5891 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5892 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5893 else
5894 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5895 }
5896 /* Legacy (single queue) transmit */
5897 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5898 ixgbe_enable_legacy_tx)
5899 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5900 /*
5901 * Message Signal Interrupts - Extended (MSI-X)
5902 * Normal MSI is only enabled if MSI-X calls fail.
5903 */
5904 if (!ixgbe_enable_msix)
5905 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5906 /* Receive-Side Scaling (RSS) */
5907 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5908 adapter->feat_en |= IXGBE_FEATURE_RSS;
5909
5910 /* Disable features with unmet dependencies... */
5911 /* No MSI-X */
5912 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5913 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5914 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5915 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5916 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5917 }
5918 } /* ixgbe_init_device_features */
5919
5920 /************************************************************************
5921 * ixgbe_probe - Device identification routine
5922 *
5923 * Determines if the driver should be loaded on
5924 * adapter based on its PCI vendor/device ID.
5925 *
5926 * return BUS_PROBE_DEFAULT on success, positive on failure
5927 ************************************************************************/
5928 static int
5929 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5930 {
5931 const struct pci_attach_args *pa = aux;
5932
5933 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5934 }
5935
5936 static const ixgbe_vendor_info_t *
5937 ixgbe_lookup(const struct pci_attach_args *pa)
5938 {
5939 const ixgbe_vendor_info_t *ent;
5940 pcireg_t subid;
5941
5942 INIT_DEBUGOUT("ixgbe_lookup: begin");
5943
5944 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5945 return NULL;
5946
5947 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5948
5949 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5950 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5951 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5952 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5953 (ent->subvendor_id == 0)) &&
5954 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5955 (ent->subdevice_id == 0))) {
5956 return ent;
5957 }
5958 }
5959 return NULL;
5960 }
5961
5962 static int
5963 ixgbe_ifflags_cb(struct ethercom *ec)
5964 {
5965 struct ifnet *ifp = &ec->ec_if;
5966 struct adapter *adapter = ifp->if_softc;
5967 int change, rc = 0;
5968
5969 IXGBE_CORE_LOCK(adapter);
5970
5971 change = ifp->if_flags ^ adapter->if_flags;
5972 if (change != 0)
5973 adapter->if_flags = ifp->if_flags;
5974
5975 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5976 rc = ENETRESET;
5977 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5978 ixgbe_set_promisc(adapter);
5979
5980 /* Set up VLAN support and filter */
5981 ixgbe_setup_vlan_hw_support(adapter);
5982
5983 IXGBE_CORE_UNLOCK(adapter);
5984
5985 return rc;
5986 }
5987
5988 /************************************************************************
5989 * ixgbe_ioctl - Ioctl entry point
5990 *
5991 * Called when the user wants to configure the interface.
5992 *
5993 * return 0 on success, positive on failure
5994 ************************************************************************/
5995 static int
5996 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5997 {
5998 struct adapter *adapter = ifp->if_softc;
5999 struct ixgbe_hw *hw = &adapter->hw;
6000 struct ifcapreq *ifcr = data;
6001 struct ifreq *ifr = data;
6002 int error = 0;
6003 int l4csum_en;
6004 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
6005 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
6006
6007 switch (command) {
6008 case SIOCSIFFLAGS:
6009 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6010 break;
6011 case SIOCADDMULTI:
6012 case SIOCDELMULTI:
6013 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6014 break;
6015 case SIOCSIFMEDIA:
6016 case SIOCGIFMEDIA:
6017 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6018 break;
6019 case SIOCSIFCAP:
6020 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6021 break;
6022 case SIOCSIFMTU:
6023 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6024 break;
6025 #ifdef __NetBSD__
6026 case SIOCINITIFADDR:
6027 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6028 break;
6029 case SIOCGIFFLAGS:
6030 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6031 break;
6032 case SIOCGIFAFLAG_IN:
6033 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6034 break;
6035 case SIOCGIFADDR:
6036 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6037 break;
6038 case SIOCGIFMTU:
6039 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6040 break;
6041 case SIOCGIFCAP:
6042 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6043 break;
6044 case SIOCGETHERCAP:
6045 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6046 break;
6047 case SIOCGLIFADDR:
6048 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6049 break;
6050 case SIOCZIFDATA:
6051 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6052 hw->mac.ops.clear_hw_cntrs(hw);
6053 ixgbe_clear_evcnt(adapter);
6054 break;
6055 case SIOCAIFADDR:
6056 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6057 break;
6058 #endif
6059 default:
6060 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6061 break;
6062 }
6063
6064 switch (command) {
6065 case SIOCSIFMEDIA:
6066 case SIOCGIFMEDIA:
6067 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
6068 case SIOCGI2C:
6069 {
6070 struct ixgbe_i2c_req i2c;
6071
6072 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6073 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6074 if (error != 0)
6075 break;
6076 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6077 error = EINVAL;
6078 break;
6079 }
6080 if (i2c.len > sizeof(i2c.data)) {
6081 error = EINVAL;
6082 break;
6083 }
6084
6085 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6086 i2c.dev_addr, i2c.data);
6087 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6088 break;
6089 }
6090 case SIOCSIFCAP:
6091 /* Layer-4 Rx checksum offload has to be turned on and
6092 * off as a unit.
6093 */
6094 l4csum_en = ifcr->ifcr_capenable & l4csum;
6095 if (l4csum_en != l4csum && l4csum_en != 0)
6096 return EINVAL;
6097 /*FALLTHROUGH*/
6098 case SIOCADDMULTI:
6099 case SIOCDELMULTI:
6100 case SIOCSIFFLAGS:
6101 case SIOCSIFMTU:
6102 default:
6103 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6104 return error;
6105 if ((ifp->if_flags & IFF_RUNNING) == 0)
6106 ;
6107 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6108 IXGBE_CORE_LOCK(adapter);
6109 if ((ifp->if_flags & IFF_RUNNING) != 0)
6110 ixgbe_init_locked(adapter);
6111 ixgbe_recalculate_max_frame(adapter);
6112 IXGBE_CORE_UNLOCK(adapter);
6113 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6114 /*
6115 * Multicast list has changed; set the hardware filter
6116 * accordingly.
6117 */
6118 IXGBE_CORE_LOCK(adapter);
6119 ixgbe_disable_intr(adapter);
6120 ixgbe_set_multi(adapter);
6121 ixgbe_enable_intr(adapter);
6122 IXGBE_CORE_UNLOCK(adapter);
6123 }
6124 return 0;
6125 }
6126
6127 return error;
6128 } /* ixgbe_ioctl */
6129
6130 /************************************************************************
6131 * ixgbe_check_fan_failure
6132 ************************************************************************/
6133 static void
6134 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6135 {
6136 u32 mask;
6137
6138 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6139 IXGBE_ESDP_SDP1;
6140
6141 if (reg & mask)
6142 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6143 } /* ixgbe_check_fan_failure */
6144
6145 /************************************************************************
6146 * ixgbe_handle_que
6147 ************************************************************************/
6148 static void
6149 ixgbe_handle_que(void *context)
6150 {
6151 struct ix_queue *que = context;
6152 struct adapter *adapter = que->adapter;
6153 struct tx_ring *txr = que->txr;
6154 struct ifnet *ifp = adapter->ifp;
6155 bool more = false;
6156
6157 que->handleq.ev_count++;
6158
6159 if (ifp->if_flags & IFF_RUNNING) {
6160 more = ixgbe_rxeof(que);
6161 IXGBE_TX_LOCK(txr);
6162 more |= ixgbe_txeof(txr);
6163 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6164 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6165 ixgbe_mq_start_locked(ifp, txr);
6166 /* Only for queue 0 */
6167 /* NetBSD still needs this for CBQ */
6168 if ((&adapter->queues[0] == que)
6169 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6170 ixgbe_legacy_start_locked(ifp, txr);
6171 IXGBE_TX_UNLOCK(txr);
6172 }
6173
6174 if (more) {
6175 que->req.ev_count++;
6176 ixgbe_sched_handle_que(adapter, que);
6177 } else if (que->res != NULL) {
6178 /* Re-enable this interrupt */
6179 ixgbe_enable_queue(adapter, que->msix);
6180 } else
6181 ixgbe_enable_intr(adapter);
6182
6183 return;
6184 } /* ixgbe_handle_que */
6185
6186 /************************************************************************
6187 * ixgbe_handle_que_work
6188 ************************************************************************/
6189 static void
6190 ixgbe_handle_que_work(struct work *wk, void *context)
6191 {
6192 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6193
6194 /*
6195 * "enqueued flag" is not required here.
6196 * See ixgbe_msix_que().
6197 */
6198 ixgbe_handle_que(que);
6199 }
6200
6201 /************************************************************************
6202 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6203 ************************************************************************/
6204 static int
6205 ixgbe_allocate_legacy(struct adapter *adapter,
6206 const struct pci_attach_args *pa)
6207 {
6208 device_t dev = adapter->dev;
6209 struct ix_queue *que = adapter->queues;
6210 struct tx_ring *txr = adapter->tx_rings;
6211 int counts[PCI_INTR_TYPE_SIZE];
6212 pci_intr_type_t intr_type, max_type;
6213 char intrbuf[PCI_INTRSTR_LEN];
6214 const char *intrstr = NULL;
6215
6216 /* We allocate a single interrupt resource */
6217 max_type = PCI_INTR_TYPE_MSI;
6218 counts[PCI_INTR_TYPE_MSIX] = 0;
6219 counts[PCI_INTR_TYPE_MSI] =
6220 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6221 /* Check not feat_en but feat_cap to fallback to INTx */
6222 counts[PCI_INTR_TYPE_INTX] =
6223 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6224
6225 alloc_retry:
6226 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
6227 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6228 return ENXIO;
6229 }
6230 adapter->osdep.nintrs = 1;
6231 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
6232 intrbuf, sizeof(intrbuf));
6233 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
6234 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6235 device_xname(dev));
6236 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
6237 if (adapter->osdep.ihs[0] == NULL) {
6238 aprint_error_dev(dev,"unable to establish %s\n",
6239 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6240 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6241 adapter->osdep.intrs = NULL;
6242 switch (intr_type) {
6243 case PCI_INTR_TYPE_MSI:
6244 /* The next try is for INTx: Disable MSI */
6245 max_type = PCI_INTR_TYPE_INTX;
6246 counts[PCI_INTR_TYPE_INTX] = 1;
6247 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6248 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6249 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6250 goto alloc_retry;
6251 } else
6252 break;
6253 case PCI_INTR_TYPE_INTX:
6254 default:
6255 /* See below */
6256 break;
6257 }
6258 }
6259 if (intr_type == PCI_INTR_TYPE_INTX) {
6260 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
6261 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6262 }
6263 if (adapter->osdep.ihs[0] == NULL) {
6264 aprint_error_dev(dev,
6265 "couldn't establish interrupt%s%s\n",
6266 intrstr ? " at " : "", intrstr ? intrstr : "");
6267 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
6268 adapter->osdep.intrs = NULL;
6269 return ENXIO;
6270 }
6271 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6272 /*
6273 * Try allocating a fast interrupt and the associated deferred
6274 * processing contexts.
6275 */
6276 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
6277 txr->txr_si =
6278 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6279 ixgbe_deferred_mq_start, txr);
6280 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6281 ixgbe_handle_que, que);
6282
6283 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
6284 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
6285 aprint_error_dev(dev,
6286 "could not establish software interrupts\n");
6287
6288 return ENXIO;
6289 }
6290 /* For simplicity in the handlers */
6291 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
6292
6293 return (0);
6294 } /* ixgbe_allocate_legacy */
6295
6296 /************************************************************************
6297 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6298 ************************************************************************/
6299 static int
6300 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
6301 {
6302 device_t dev = adapter->dev;
6303 struct ix_queue *que = adapter->queues;
6304 struct tx_ring *txr = adapter->tx_rings;
6305 pci_chipset_tag_t pc;
6306 char intrbuf[PCI_INTRSTR_LEN];
6307 char intr_xname[32];
6308 char wqname[MAXCOMLEN];
6309 const char *intrstr = NULL;
6310 int error, vector = 0;
6311 int cpu_id = 0;
6312 kcpuset_t *affinity;
6313 #ifdef RSS
6314 unsigned int rss_buckets = 0;
6315 kcpuset_t cpu_mask;
6316 #endif
6317
6318 pc = adapter->osdep.pc;
6319 #ifdef RSS
6320 /*
6321 * If we're doing RSS, the number of queues needs to
6322 * match the number of RSS buckets that are configured.
6323 *
6324 * + If there's more queues than RSS buckets, we'll end
6325 * up with queues that get no traffic.
6326 *
6327 * + If there's more RSS buckets than queues, we'll end
6328 * up having multiple RSS buckets map to the same queue,
6329 * so there'll be some contention.
6330 */
6331 rss_buckets = rss_getnumbuckets();
6332 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6333 (adapter->num_queues != rss_buckets)) {
6334 device_printf(dev,
6335 "%s: number of queues (%d) != number of RSS buckets (%d)"
6336 "; performance will be impacted.\n",
6337 __func__, adapter->num_queues, rss_buckets);
6338 }
6339 #endif
6340
6341 adapter->osdep.nintrs = adapter->num_queues + 1;
6342 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6343 adapter->osdep.nintrs) != 0) {
6344 aprint_error_dev(dev,
6345 "failed to allocate MSI-X interrupt\n");
6346 return (ENXIO);
6347 }
6348
6349 kcpuset_create(&affinity, false);
6350 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6351 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6352 device_xname(dev), i);
6353 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6354 sizeof(intrbuf));
6355 #ifdef IXGBE_MPSAFE
6356 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6357 true);
6358 #endif
6359 /* Set the handler function */
6360 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6361 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6362 intr_xname);
6363 if (que->res == NULL) {
6364 aprint_error_dev(dev,
6365 "Failed to register QUE handler\n");
6366 error = ENXIO;
6367 goto err_out;
6368 }
6369 que->msix = vector;
6370 adapter->active_queues |= (u64)(1 << que->msix);
6371
6372 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6373 #ifdef RSS
6374 /*
6375 * The queue ID is used as the RSS layer bucket ID.
6376 * We look up the queue ID -> RSS CPU ID and select
6377 * that.
6378 */
6379 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6380 CPU_SETOF(cpu_id, &cpu_mask);
6381 #endif
6382 } else {
6383 /*
6384 * Bind the MSI-X vector, and thus the
6385 * rings to the corresponding CPU.
6386 *
6387 * This just happens to match the default RSS
6388 * round-robin bucket -> queue -> CPU allocation.
6389 */
6390 if (adapter->num_queues > 1)
6391 cpu_id = i;
6392 }
6393 /* Round-robin affinity */
6394 kcpuset_zero(affinity);
6395 kcpuset_set(affinity, cpu_id % ncpu);
6396 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6397 NULL);
6398 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6399 intrstr);
6400 if (error == 0) {
6401 #if 1 /* def IXGBE_DEBUG */
6402 #ifdef RSS
6403 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6404 cpu_id % ncpu);
6405 #else
6406 aprint_normal(", bound queue %d to cpu %d", i,
6407 cpu_id % ncpu);
6408 #endif
6409 #endif /* IXGBE_DEBUG */
6410 }
6411 aprint_normal("\n");
6412
6413 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6414 txr->txr_si = softint_establish(
6415 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6416 ixgbe_deferred_mq_start, txr);
6417 if (txr->txr_si == NULL) {
6418 aprint_error_dev(dev,
6419 "couldn't establish software interrupt\n");
6420 error = ENXIO;
6421 goto err_out;
6422 }
6423 }
6424 que->que_si
6425 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6426 ixgbe_handle_que, que);
6427 if (que->que_si == NULL) {
6428 aprint_error_dev(dev,
6429 "couldn't establish software interrupt\n");
6430 error = ENXIO;
6431 goto err_out;
6432 }
6433 }
6434 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6435 error = workqueue_create(&adapter->txr_wq, wqname,
6436 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6437 IXGBE_WORKQUEUE_FLAGS);
6438 if (error) {
6439 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6440 goto err_out;
6441 }
6442 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6443
6444 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6445 error = workqueue_create(&adapter->que_wq, wqname,
6446 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6447 IXGBE_WORKQUEUE_FLAGS);
6448 if (error) {
6449 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6450 goto err_out;
6451 }
6452
6453 /* and Link */
6454 cpu_id++;
6455 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6456 adapter->vector = vector;
6457 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6458 sizeof(intrbuf));
6459 #ifdef IXGBE_MPSAFE
6460 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6461 true);
6462 #endif
6463 /* Set the link handler function */
6464 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6465 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6466 intr_xname);
6467 if (adapter->osdep.ihs[vector] == NULL) {
6468 aprint_error_dev(dev, "Failed to register LINK handler\n");
6469 error = ENXIO;
6470 goto err_out;
6471 }
6472 /* Round-robin affinity */
6473 kcpuset_zero(affinity);
6474 kcpuset_set(affinity, cpu_id % ncpu);
6475 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6476 NULL);
6477
6478 aprint_normal_dev(dev,
6479 "for link, interrupting at %s", intrstr);
6480 if (error == 0)
6481 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6482 else
6483 aprint_normal("\n");
6484
6485 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6486 adapter->mbx_si =
6487 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6488 ixgbe_handle_mbx, adapter);
6489 if (adapter->mbx_si == NULL) {
6490 aprint_error_dev(dev,
6491 "could not establish software interrupts\n");
6492
6493 error = ENXIO;
6494 goto err_out;
6495 }
6496 }
6497
6498 kcpuset_destroy(affinity);
6499 aprint_normal_dev(dev,
6500 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6501
6502 return (0);
6503
6504 err_out:
6505 kcpuset_destroy(affinity);
6506 ixgbe_free_softint(adapter);
6507 ixgbe_free_pciintr_resources(adapter);
6508 return (error);
6509 } /* ixgbe_allocate_msix */
6510
6511 /************************************************************************
6512 * ixgbe_configure_interrupts
6513 *
6514 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6515 * This will also depend on user settings.
6516 ************************************************************************/
6517 static int
6518 ixgbe_configure_interrupts(struct adapter *adapter)
6519 {
6520 device_t dev = adapter->dev;
6521 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6522 int want, queues, msgs;
6523
6524 /* Default to 1 queue if MSI-X setup fails */
6525 adapter->num_queues = 1;
6526
6527 /* Override by tuneable */
6528 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6529 goto msi;
6530
6531 /*
6532 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6533 * interrupt slot.
6534 */
6535 if (ncpu == 1)
6536 goto msi;
6537
6538 /* First try MSI-X */
6539 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6540 msgs = MIN(msgs, IXG_MAX_NINTR);
6541 if (msgs < 2)
6542 goto msi;
6543
6544 adapter->msix_mem = (void *)1; /* XXX */
6545
6546 /* Figure out a reasonable auto config value */
6547 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6548
6549 #ifdef RSS
6550 /* If we're doing RSS, clamp at the number of RSS buckets */
6551 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6552 queues = uimin(queues, rss_getnumbuckets());
6553 #endif
6554 if (ixgbe_num_queues > queues) {
6555 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6556 ixgbe_num_queues = queues;
6557 }
6558
6559 if (ixgbe_num_queues != 0)
6560 queues = ixgbe_num_queues;
6561 else
6562 queues = uimin(queues,
6563 uimin(mac->max_tx_queues, mac->max_rx_queues));
6564
6565 /* reflect correct sysctl value */
6566 ixgbe_num_queues = queues;
6567
6568 /*
6569 * Want one vector (RX/TX pair) per queue
6570 * plus an additional for Link.
6571 */
6572 want = queues + 1;
6573 if (msgs >= want)
6574 msgs = want;
6575 else {
6576 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6577 "%d vectors but %d queues wanted!\n",
6578 msgs, want);
6579 goto msi;
6580 }
6581 adapter->num_queues = queues;
6582 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6583 return (0);
6584
6585 /*
6586 * MSI-X allocation failed or provided us with
6587 * less vectors than needed. Free MSI-X resources
6588 * and we'll try enabling MSI.
6589 */
6590 msi:
6591 /* Without MSI-X, some features are no longer supported */
6592 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6593 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6594 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6595 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6596
6597 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6598 adapter->msix_mem = NULL; /* XXX */
6599 if (msgs > 1)
6600 msgs = 1;
6601 if (msgs != 0) {
6602 msgs = 1;
6603 adapter->feat_en |= IXGBE_FEATURE_MSI;
6604 return (0);
6605 }
6606
6607 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6608 aprint_error_dev(dev,
6609 "Device does not support legacy interrupts.\n");
6610 return 1;
6611 }
6612
6613 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6614
6615 return (0);
6616 } /* ixgbe_configure_interrupts */
6617
6618
6619 /************************************************************************
6620 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6621 *
6622 * Done outside of interrupt context since the driver might sleep
6623 ************************************************************************/
6624 static void
6625 ixgbe_handle_link(void *context)
6626 {
6627 struct adapter *adapter = context;
6628 struct ixgbe_hw *hw = &adapter->hw;
6629
6630 IXGBE_CORE_LOCK(adapter);
6631 ++adapter->link_sicount.ev_count;
6632 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6633 ixgbe_update_link_status(adapter);
6634
6635 /* Re-enable link interrupts */
6636 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6637
6638 IXGBE_CORE_UNLOCK(adapter);
6639 } /* ixgbe_handle_link */
6640
6641 #if 0
6642 /************************************************************************
6643 * ixgbe_rearm_queues
6644 ************************************************************************/
6645 static __inline void
6646 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6647 {
6648 u32 mask;
6649
6650 switch (adapter->hw.mac.type) {
6651 case ixgbe_mac_82598EB:
6652 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6653 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6654 break;
6655 case ixgbe_mac_82599EB:
6656 case ixgbe_mac_X540:
6657 case ixgbe_mac_X550:
6658 case ixgbe_mac_X550EM_x:
6659 case ixgbe_mac_X550EM_a:
6660 mask = (queues & 0xFFFFFFFF);
6661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6662 mask = (queues >> 32);
6663 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6664 break;
6665 default:
6666 break;
6667 }
6668 } /* ixgbe_rearm_queues */
6669 #endif
6670