ixgbe.c revision 1.128 1 /* $NetBSD: ixgbe.c,v 1.128 2018/03/02 10:19:20 knakahara Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
36
37 /*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved.
40 *
41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 #ifdef _KERNEL_OPT
67 #include "opt_inet.h"
68 #include "opt_inet6.h"
69 #include "opt_net_mpsafe.h"
70 #endif
71
72 #include "ixgbe.h"
73 #include "vlan.h"
74
75 #include <sys/cprng.h>
76 #include <dev/mii/mii.h>
77 #include <dev/mii/miivar.h>
78
79 /************************************************************************
80 * Driver version
81 ************************************************************************/
82 char ixgbe_driver_version[] = "3.2.12-k";
83
84
85 /************************************************************************
86 * PCI Device ID Table
87 *
88 * Used by probe to select devices to load on
89 * Last field stores an index into ixgbe_strings
90 * Last entry must be all 0s
91 *
92 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
93 ************************************************************************/
94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
95 {
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
140 /* required last entry */
141 {0, 0, 0, 0, 0}
142 };
143
144 /************************************************************************
145 * Table of branding strings
146 ************************************************************************/
147 static const char *ixgbe_strings[] = {
148 "Intel(R) PRO/10GbE PCI-Express Network Driver"
149 };
150
151 /************************************************************************
152 * Function prototypes
153 ************************************************************************/
154 static int ixgbe_probe(device_t, cfdata_t, void *);
155 static void ixgbe_attach(device_t, device_t, void *);
156 static int ixgbe_detach(device_t, int);
157 #if 0
158 static int ixgbe_shutdown(device_t);
159 #endif
160 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
161 static bool ixgbe_resume(device_t, const pmf_qual_t *);
162 static int ixgbe_ifflags_cb(struct ethercom *);
163 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
164 static void ixgbe_ifstop(struct ifnet *, int);
165 static int ixgbe_init(struct ifnet *);
166 static void ixgbe_init_locked(struct adapter *);
167 static void ixgbe_stop(void *);
168 static void ixgbe_init_device_features(struct adapter *);
169 static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
170 static void ixgbe_add_media_types(struct adapter *);
171 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
172 static int ixgbe_media_change(struct ifnet *);
173 static int ixgbe_allocate_pci_resources(struct adapter *,
174 const struct pci_attach_args *);
175 static void ixgbe_free_softint(struct adapter *);
176 static void ixgbe_get_slot_info(struct adapter *);
177 static int ixgbe_allocate_msix(struct adapter *,
178 const struct pci_attach_args *);
179 static int ixgbe_allocate_legacy(struct adapter *,
180 const struct pci_attach_args *);
181 static int ixgbe_configure_interrupts(struct adapter *);
182 static void ixgbe_free_pciintr_resources(struct adapter *);
183 static void ixgbe_free_pci_resources(struct adapter *);
184 static void ixgbe_local_timer(void *);
185 static void ixgbe_local_timer1(void *);
186 static int ixgbe_setup_interface(device_t, struct adapter *);
187 static void ixgbe_config_gpie(struct adapter *);
188 static void ixgbe_config_dmac(struct adapter *);
189 static void ixgbe_config_delay_values(struct adapter *);
190 static void ixgbe_config_link(struct adapter *);
191 static void ixgbe_check_wol_support(struct adapter *);
192 static int ixgbe_setup_low_power_mode(struct adapter *);
193 static void ixgbe_rearm_queues(struct adapter *, u64);
194
195 static void ixgbe_initialize_transmit_units(struct adapter *);
196 static void ixgbe_initialize_receive_units(struct adapter *);
197 static void ixgbe_enable_rx_drop(struct adapter *);
198 static void ixgbe_disable_rx_drop(struct adapter *);
199 static void ixgbe_initialize_rss_mapping(struct adapter *);
200
201 static void ixgbe_enable_intr(struct adapter *);
202 static void ixgbe_disable_intr(struct adapter *);
203 static void ixgbe_update_stats_counters(struct adapter *);
204 static void ixgbe_set_promisc(struct adapter *);
205 static void ixgbe_set_multi(struct adapter *);
206 static void ixgbe_update_link_status(struct adapter *);
207 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
208 static void ixgbe_configure_ivars(struct adapter *);
209 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
210 static void ixgbe_eitr_write(struct ix_queue *, uint32_t);
211
212 static void ixgbe_setup_vlan_hw_support(struct adapter *);
213 #if 0
214 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
215 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
216 #endif
217
218 static void ixgbe_add_device_sysctls(struct adapter *);
219 static void ixgbe_add_hw_stats(struct adapter *);
220 static void ixgbe_clear_evcnt(struct adapter *);
221 static int ixgbe_set_flowcntl(struct adapter *, int);
222 static int ixgbe_set_advertise(struct adapter *, int);
223 static int ixgbe_get_advertise(struct adapter *);
224
225 /* Sysctl handlers */
226 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
227 const char *, int *, int);
228 static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
229 static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
230 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
231 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
232 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
233 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
234 #ifdef IXGBE_DEBUG
235 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
236 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
237 #endif
238 static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
239 static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
240 static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
241 static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
242 static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
243 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
244 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
245
246 /* Support for pluggable optic modules */
247 static bool ixgbe_sfp_probe(struct adapter *);
248
249 /* Legacy (single vector) interrupt handler */
250 static int ixgbe_legacy_irq(void *);
251
252 /* The MSI/MSI-X Interrupt handlers */
253 static int ixgbe_msix_que(void *);
254 static int ixgbe_msix_link(void *);
255
256 /* Software interrupts for deferred work */
257 static void ixgbe_handle_que(void *);
258 static void ixgbe_handle_link(void *);
259 static void ixgbe_handle_msf(void *);
260 static void ixgbe_handle_mod(void *);
261 static void ixgbe_handle_phy(void *);
262
263 /* Workqueue handler for deferred work */
264 static void ixgbe_handle_que_work(struct work *, void *);
265
266 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
267
268 /************************************************************************
269 * NetBSD Device Interface Entry Points
270 ************************************************************************/
271 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
272 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
273 DVF_DETACH_SHUTDOWN);
274
275 #if 0
276 devclass_t ix_devclass;
277 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
278
279 MODULE_DEPEND(ix, pci, 1, 1, 1);
280 MODULE_DEPEND(ix, ether, 1, 1, 1);
281 #ifdef DEV_NETMAP
282 MODULE_DEPEND(ix, netmap, 1, 1, 1);
283 #endif
284 #endif
285
286 /*
287 * TUNEABLE PARAMETERS:
288 */
289
290 /*
291 * AIM: Adaptive Interrupt Moderation
292 * which means that the interrupt rate
293 * is varied over time based on the
294 * traffic for that interrupt vector
295 */
296 static bool ixgbe_enable_aim = true;
297 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
298 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
299 "Enable adaptive interrupt moderation");
300
301 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
302 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
303 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
304
305 /* How many packets rxeof tries to clean at a time */
306 static int ixgbe_rx_process_limit = 256;
307 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
308 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
309
310 /* How many packets txeof tries to clean at a time */
311 static int ixgbe_tx_process_limit = 256;
312 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
313 &ixgbe_tx_process_limit, 0,
314 "Maximum number of sent packets to process at a time, -1 means unlimited");
315
316 /* Flow control setting, default to full */
317 static int ixgbe_flow_control = ixgbe_fc_full;
318 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
319 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
320
321 /* Which pakcet processing uses workqueue or softint */
322 static bool ixgbe_txrx_workqueue = false;
323
324 /*
325 * Smart speed setting, default to on
326 * this only works as a compile option
327 * right now as its during attach, set
328 * this to 'ixgbe_smart_speed_off' to
329 * disable.
330 */
331 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
332
333 /*
334 * MSI-X should be the default for best performance,
335 * but this allows it to be forced off for testing.
336 */
337 static int ixgbe_enable_msix = 1;
338 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
339 "Enable MSI-X interrupts");
340
341 /*
342 * Number of Queues, can be set to 0,
343 * it then autoconfigures based on the
344 * number of cpus with a max of 8. This
345 * can be overriden manually here.
346 */
347 static int ixgbe_num_queues = 0;
348 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
349 "Number of queues to configure, 0 indicates autoconfigure");
350
351 /*
352 * Number of TX descriptors per ring,
353 * setting higher than RX as this seems
354 * the better performing choice.
355 */
356 static int ixgbe_txd = PERFORM_TXD;
357 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
358 "Number of transmit descriptors per queue");
359
360 /* Number of RX descriptors per ring */
361 static int ixgbe_rxd = PERFORM_RXD;
362 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
363 "Number of receive descriptors per queue");
364
365 /*
366 * Defining this on will allow the use
367 * of unsupported SFP+ modules, note that
368 * doing so you are on your own :)
369 */
370 static int allow_unsupported_sfp = false;
371 #define TUNABLE_INT(__x, __y)
372 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
373
374 /*
375 * Not sure if Flow Director is fully baked,
376 * so we'll default to turning it off.
377 */
378 static int ixgbe_enable_fdir = 0;
379 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
380 "Enable Flow Director");
381
382 /* Legacy Transmit (single queue) */
383 static int ixgbe_enable_legacy_tx = 0;
384 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
385 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
386
387 /* Receive-Side Scaling */
388 static int ixgbe_enable_rss = 1;
389 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
390 "Enable Receive-Side Scaling (RSS)");
391
392 /* Keep running tab on them for sanity check */
393 static int ixgbe_total_ports;
394
395 #if 0
396 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
397 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
398 #endif
399
400 #ifdef NET_MPSAFE
401 #define IXGBE_MPSAFE 1
402 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
403 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
404 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
405 #else
406 #define IXGBE_CALLOUT_FLAGS 0
407 #define IXGBE_SOFTINFT_FLAGS 0
408 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
409 #endif
410 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
411
412 /************************************************************************
413 * ixgbe_initialize_rss_mapping
414 ************************************************************************/
415 static void
416 ixgbe_initialize_rss_mapping(struct adapter *adapter)
417 {
418 struct ixgbe_hw *hw = &adapter->hw;
419 u32 reta = 0, mrqc, rss_key[10];
420 int queue_id, table_size, index_mult;
421 int i, j;
422 u32 rss_hash_config;
423
424 /* force use default RSS key. */
425 #ifdef __NetBSD__
426 rss_getkey((uint8_t *) &rss_key);
427 #else
428 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
429 /* Fetch the configured RSS key */
430 rss_getkey((uint8_t *) &rss_key);
431 } else {
432 /* set up random bits */
433 cprng_fast(&rss_key, sizeof(rss_key));
434 }
435 #endif
436
437 /* Set multiplier for RETA setup and table size based on MAC */
438 index_mult = 0x1;
439 table_size = 128;
440 switch (adapter->hw.mac.type) {
441 case ixgbe_mac_82598EB:
442 index_mult = 0x11;
443 break;
444 case ixgbe_mac_X550:
445 case ixgbe_mac_X550EM_x:
446 case ixgbe_mac_X550EM_a:
447 table_size = 512;
448 break;
449 default:
450 break;
451 }
452
453 /* Set up the redirection table */
454 for (i = 0, j = 0; i < table_size; i++, j++) {
455 if (j == adapter->num_queues)
456 j = 0;
457
458 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
459 /*
460 * Fetch the RSS bucket id for the given indirection
461 * entry. Cap it at the number of configured buckets
462 * (which is num_queues.)
463 */
464 queue_id = rss_get_indirection_to_bucket(i);
465 queue_id = queue_id % adapter->num_queues;
466 } else
467 queue_id = (j * index_mult);
468
469 /*
470 * The low 8 bits are for hash value (n+0);
471 * The next 8 bits are for hash value (n+1), etc.
472 */
473 reta = reta >> 8;
474 reta = reta | (((uint32_t) queue_id) << 24);
475 if ((i & 3) == 3) {
476 if (i < 128)
477 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
478 else
479 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
480 reta);
481 reta = 0;
482 }
483 }
484
485 /* Now fill our hash function seeds */
486 for (i = 0; i < 10; i++)
487 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
488
489 /* Perform hash on these packet types */
490 if (adapter->feat_en & IXGBE_FEATURE_RSS)
491 rss_hash_config = rss_gethashconfig();
492 else {
493 /*
494 * Disable UDP - IP fragments aren't currently being handled
495 * and so we end up with a mix of 2-tuple and 4-tuple
496 * traffic.
497 */
498 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
499 | RSS_HASHTYPE_RSS_TCP_IPV4
500 | RSS_HASHTYPE_RSS_IPV6
501 | RSS_HASHTYPE_RSS_TCP_IPV6
502 | RSS_HASHTYPE_RSS_IPV6_EX
503 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
504 }
505
506 mrqc = IXGBE_MRQC_RSSEN;
507 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
508 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
509 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
510 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
511 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
512 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
513 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
514 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
525 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
526 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
527 } /* ixgbe_initialize_rss_mapping */
528
529 /************************************************************************
530 * ixgbe_initialize_receive_units - Setup receive registers and features.
531 ************************************************************************/
532 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
533
534 static void
535 ixgbe_initialize_receive_units(struct adapter *adapter)
536 {
537 struct rx_ring *rxr = adapter->rx_rings;
538 struct ixgbe_hw *hw = &adapter->hw;
539 struct ifnet *ifp = adapter->ifp;
540 int i, j;
541 u32 bufsz, fctrl, srrctl, rxcsum;
542 u32 hlreg;
543
544 /*
545 * Make sure receives are disabled while
546 * setting up the descriptor ring
547 */
548 ixgbe_disable_rx(hw);
549
550 /* Enable broadcasts */
551 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
552 fctrl |= IXGBE_FCTRL_BAM;
553 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
554 fctrl |= IXGBE_FCTRL_DPF;
555 fctrl |= IXGBE_FCTRL_PMCF;
556 }
557 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
558
559 /* Set for Jumbo Frames? */
560 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
561 if (ifp->if_mtu > ETHERMTU)
562 hlreg |= IXGBE_HLREG0_JUMBOEN;
563 else
564 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
565
566 #ifdef DEV_NETMAP
567 /* CRC stripping is conditional in Netmap */
568 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
569 (ifp->if_capenable & IFCAP_NETMAP) &&
570 !ix_crcstrip)
571 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
572 else
573 #endif /* DEV_NETMAP */
574 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
575
576 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
577
578 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
579 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
580
581 for (i = 0; i < adapter->num_queues; i++, rxr++) {
582 u64 rdba = rxr->rxdma.dma_paddr;
583 u32 tqsmreg, reg;
584 int regnum = i / 4; /* 1 register per 4 queues */
585 int regshift = i % 4; /* 4 bits per 1 queue */
586 j = rxr->me;
587
588 /* Setup the Base and Length of the Rx Descriptor Ring */
589 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
590 (rdba & 0x00000000ffffffffULL));
591 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
592 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
593 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
594
595 /* Set up the SRRCTL register */
596 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
597 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
598 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
599 srrctl |= bufsz;
600 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
601
602 /* Set RQSMR (Receive Queue Statistic Mapping) register */
603 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
604 reg &= ~(0x000000ff << (regshift * 8));
605 reg |= i << (regshift * 8);
606 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
607
608 /*
609 * Set RQSMR (Receive Queue Statistic Mapping) register.
610 * Register location for queue 0...7 are different between
611 * 82598 and newer.
612 */
613 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
614 tqsmreg = IXGBE_TQSMR(regnum);
615 else
616 tqsmreg = IXGBE_TQSM(regnum);
617 reg = IXGBE_READ_REG(hw, tqsmreg);
618 reg &= ~(0x000000ff << (regshift * 8));
619 reg |= i << (regshift * 8);
620 IXGBE_WRITE_REG(hw, tqsmreg, reg);
621
622 /*
623 * Set DROP_EN iff we have no flow control and >1 queue.
624 * Note that srrctl was cleared shortly before during reset,
625 * so we do not need to clear the bit, but do it just in case
626 * this code is moved elsewhere.
627 */
628 if (adapter->num_queues > 1 &&
629 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
630 srrctl |= IXGBE_SRRCTL_DROP_EN;
631 } else {
632 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
633 }
634
635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
636
637 /* Setup the HW Rx Head and Tail Descriptor Pointers */
638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
640
641 /* Set the driver rx tail address */
642 rxr->tail = IXGBE_RDT(rxr->me);
643 }
644
645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
647 | IXGBE_PSRTYPE_UDPHDR
648 | IXGBE_PSRTYPE_IPV4HDR
649 | IXGBE_PSRTYPE_IPV6HDR;
650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
651 }
652
653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
654
655 ixgbe_initialize_rss_mapping(adapter);
656
657 if (adapter->num_queues > 1) {
658 /* RSS and RX IPP Checksum are mutually exclusive */
659 rxcsum |= IXGBE_RXCSUM_PCSD;
660 }
661
662 if (ifp->if_capenable & IFCAP_RXCSUM)
663 rxcsum |= IXGBE_RXCSUM_PCSD;
664
665 /* This is useful for calculating UDP/IP fragment checksums */
666 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
667 rxcsum |= IXGBE_RXCSUM_IPPCSE;
668
669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
670
671 return;
672 } /* ixgbe_initialize_receive_units */
673
674 /************************************************************************
675 * ixgbe_initialize_transmit_units - Enable transmit units.
676 ************************************************************************/
677 static void
678 ixgbe_initialize_transmit_units(struct adapter *adapter)
679 {
680 struct tx_ring *txr = adapter->tx_rings;
681 struct ixgbe_hw *hw = &adapter->hw;
682
683 /* Setup the Base and Length of the Tx Descriptor Ring */
684 for (int i = 0; i < adapter->num_queues; i++, txr++) {
685 u64 tdba = txr->txdma.dma_paddr;
686 u32 txctrl = 0;
687 int j = txr->me;
688
689 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
690 (tdba & 0x00000000ffffffffULL));
691 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
692 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
693 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
694
695 /* Setup the HW Tx Head and Tail descriptor pointers */
696 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
697 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
698
699 /* Cache the tail address */
700 txr->tail = IXGBE_TDT(j);
701
702 /* Disable Head Writeback */
703 /*
704 * Note: for X550 series devices, these registers are actually
705 * prefixed with TPH_ isntead of DCA_, but the addresses and
706 * fields remain the same.
707 */
708 switch (hw->mac.type) {
709 case ixgbe_mac_82598EB:
710 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
711 break;
712 default:
713 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
714 break;
715 }
716 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
717 switch (hw->mac.type) {
718 case ixgbe_mac_82598EB:
719 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
720 break;
721 default:
722 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
723 break;
724 }
725
726 }
727
728 if (hw->mac.type != ixgbe_mac_82598EB) {
729 u32 dmatxctl, rttdcs;
730
731 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
732 dmatxctl |= IXGBE_DMATXCTL_TE;
733 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
734 /* Disable arbiter to set MTQC */
735 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
736 rttdcs |= IXGBE_RTTDCS_ARBDIS;
737 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
738 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
739 ixgbe_get_mtqc(adapter->iov_mode));
740 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
741 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
742 }
743
744 return;
745 } /* ixgbe_initialize_transmit_units */
746
747 /************************************************************************
748 * ixgbe_attach - Device initialization routine
749 *
750 * Called when the driver is being loaded.
751 * Identifies the type of hardware, allocates all resources
752 * and initializes the hardware.
753 *
754 * return 0 on success, positive on failure
755 ************************************************************************/
756 static void
757 ixgbe_attach(device_t parent, device_t dev, void *aux)
758 {
759 struct adapter *adapter;
760 struct ixgbe_hw *hw;
761 int error = -1;
762 u32 ctrl_ext;
763 u16 high, low, nvmreg;
764 pcireg_t id, subid;
765 ixgbe_vendor_info_t *ent;
766 struct pci_attach_args *pa = aux;
767 const char *str;
768 char buf[256];
769
770 INIT_DEBUGOUT("ixgbe_attach: begin");
771
772 /* Allocate, clear, and link in our adapter structure */
773 adapter = device_private(dev);
774 adapter->hw.back = adapter;
775 adapter->dev = dev;
776 hw = &adapter->hw;
777 adapter->osdep.pc = pa->pa_pc;
778 adapter->osdep.tag = pa->pa_tag;
779 if (pci_dma64_available(pa))
780 adapter->osdep.dmat = pa->pa_dmat64;
781 else
782 adapter->osdep.dmat = pa->pa_dmat;
783 adapter->osdep.attached = false;
784
785 ent = ixgbe_lookup(pa);
786
787 KASSERT(ent != NULL);
788
789 aprint_normal(": %s, Version - %s\n",
790 ixgbe_strings[ent->index], ixgbe_driver_version);
791
792 /* Core Lock Init*/
793 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
794
795 /* Set up the timer callout */
796 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
797
798 /* Determine hardware revision */
799 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
800 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
801
802 hw->vendor_id = PCI_VENDOR(id);
803 hw->device_id = PCI_PRODUCT(id);
804 hw->revision_id =
805 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
806 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
807 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
808
809 /*
810 * Make sure BUSMASTER is set
811 */
812 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
813
814 /* Do base PCI setup - map BAR0 */
815 if (ixgbe_allocate_pci_resources(adapter, pa)) {
816 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
817 error = ENXIO;
818 goto err_out;
819 }
820
821 /* let hardware know driver is loaded */
822 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
823 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
824 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
825
826 /*
827 * Initialize the shared code
828 */
829 if (ixgbe_init_shared_code(hw)) {
830 aprint_error_dev(dev, "Unable to initialize the shared code\n");
831 error = ENXIO;
832 goto err_out;
833 }
834
835 switch (hw->mac.type) {
836 case ixgbe_mac_82598EB:
837 str = "82598EB";
838 break;
839 case ixgbe_mac_82599EB:
840 str = "82599EB";
841 break;
842 case ixgbe_mac_X540:
843 str = "X540";
844 break;
845 case ixgbe_mac_X550:
846 str = "X550";
847 break;
848 case ixgbe_mac_X550EM_x:
849 str = "X550EM";
850 break;
851 case ixgbe_mac_X550EM_a:
852 str = "X550EM A";
853 break;
854 default:
855 str = "Unknown";
856 break;
857 }
858 aprint_normal_dev(dev, "device %s\n", str);
859
860 if (hw->mbx.ops.init_params)
861 hw->mbx.ops.init_params(hw);
862
863 hw->allow_unsupported_sfp = allow_unsupported_sfp;
864
865 /* Pick up the 82599 settings */
866 if (hw->mac.type != ixgbe_mac_82598EB) {
867 hw->phy.smart_speed = ixgbe_smart_speed;
868 adapter->num_segs = IXGBE_82599_SCATTER;
869 } else
870 adapter->num_segs = IXGBE_82598_SCATTER;
871
872 hw->mac.ops.set_lan_id(hw);
873 ixgbe_init_device_features(adapter);
874
875 if (ixgbe_configure_interrupts(adapter)) {
876 error = ENXIO;
877 goto err_out;
878 }
879
880 /* Allocate multicast array memory. */
881 adapter->mta = malloc(sizeof(*adapter->mta) *
882 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
883 if (adapter->mta == NULL) {
884 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
885 error = ENOMEM;
886 goto err_out;
887 }
888
889 /* Enable WoL (if supported) */
890 ixgbe_check_wol_support(adapter);
891
892 /* Verify adapter fan is still functional (if applicable) */
893 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
894 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
895 ixgbe_check_fan_failure(adapter, esdp, FALSE);
896 }
897
898 /* Ensure SW/FW semaphore is free */
899 ixgbe_init_swfw_semaphore(hw);
900
901 /* Enable EEE power saving */
902 if (adapter->feat_en & IXGBE_FEATURE_EEE)
903 hw->mac.ops.setup_eee(hw, TRUE);
904
905 /* Set an initial default flow control value */
906 hw->fc.requested_mode = ixgbe_flow_control;
907
908 /* Sysctls for limiting the amount of work done in the taskqueues */
909 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
910 "max number of rx packets to process",
911 &adapter->rx_process_limit, ixgbe_rx_process_limit);
912
913 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
914 "max number of tx packets to process",
915 &adapter->tx_process_limit, ixgbe_tx_process_limit);
916
917 /* Do descriptor calc and sanity checks */
918 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
919 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
920 aprint_error_dev(dev, "TXD config issue, using default!\n");
921 adapter->num_tx_desc = DEFAULT_TXD;
922 } else
923 adapter->num_tx_desc = ixgbe_txd;
924
925 /*
926 * With many RX rings it is easy to exceed the
927 * system mbuf allocation. Tuning nmbclusters
928 * can alleviate this.
929 */
930 if (nmbclusters > 0) {
931 int s;
932 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
933 if (s > nmbclusters) {
934 aprint_error_dev(dev, "RX Descriptors exceed "
935 "system mbuf max, using default instead!\n");
936 ixgbe_rxd = DEFAULT_RXD;
937 }
938 }
939
940 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
941 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
942 aprint_error_dev(dev, "RXD config issue, using default!\n");
943 adapter->num_rx_desc = DEFAULT_RXD;
944 } else
945 adapter->num_rx_desc = ixgbe_rxd;
946
947 /* Allocate our TX/RX Queues */
948 if (ixgbe_allocate_queues(adapter)) {
949 error = ENOMEM;
950 goto err_out;
951 }
952
953 hw->phy.reset_if_overtemp = TRUE;
954 error = ixgbe_reset_hw(hw);
955 hw->phy.reset_if_overtemp = FALSE;
956 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
957 /*
958 * No optics in this port, set up
959 * so the timer routine will probe
960 * for later insertion.
961 */
962 adapter->sfp_probe = TRUE;
963 error = IXGBE_SUCCESS;
964 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
965 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
966 error = EIO;
967 goto err_late;
968 } else if (error) {
969 aprint_error_dev(dev, "Hardware initialization failed\n");
970 error = EIO;
971 goto err_late;
972 }
973
974 /* Make sure we have a good EEPROM before we read from it */
975 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
976 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
977 error = EIO;
978 goto err_late;
979 }
980
981 aprint_normal("%s:", device_xname(dev));
982 /* NVM Image Version */
983 switch (hw->mac.type) {
984 case ixgbe_mac_X540:
985 case ixgbe_mac_X550EM_a:
986 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
987 if (nvmreg == 0xffff)
988 break;
989 high = (nvmreg >> 12) & 0x0f;
990 low = (nvmreg >> 4) & 0xff;
991 id = nvmreg & 0x0f;
992 aprint_normal(" NVM Image Version %u.", high);
993 if (hw->mac.type == ixgbe_mac_X540)
994 str = "%x";
995 else
996 str = "%02x";
997 aprint_normal(str, low);
998 aprint_normal(" ID 0x%x,", id);
999 break;
1000 case ixgbe_mac_X550EM_x:
1001 case ixgbe_mac_X550:
1002 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1003 if (nvmreg == 0xffff)
1004 break;
1005 high = (nvmreg >> 12) & 0x0f;
1006 low = nvmreg & 0xff;
1007 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1008 break;
1009 default:
1010 break;
1011 }
1012
1013 /* PHY firmware revision */
1014 switch (hw->mac.type) {
1015 case ixgbe_mac_X540:
1016 case ixgbe_mac_X550:
1017 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1018 if (nvmreg == 0xffff)
1019 break;
1020 high = (nvmreg >> 12) & 0x0f;
1021 low = (nvmreg >> 4) & 0xff;
1022 id = nvmreg & 0x000f;
1023 aprint_normal(" PHY FW Revision %u.", high);
1024 if (hw->mac.type == ixgbe_mac_X540)
1025 str = "%x";
1026 else
1027 str = "%02x";
1028 aprint_normal(str, low);
1029 aprint_normal(" ID 0x%x,", id);
1030 break;
1031 default:
1032 break;
1033 }
1034
1035 /* NVM Map version & OEM NVM Image version */
1036 switch (hw->mac.type) {
1037 case ixgbe_mac_X550:
1038 case ixgbe_mac_X550EM_x:
1039 case ixgbe_mac_X550EM_a:
1040 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1041 if (nvmreg != 0xffff) {
1042 high = (nvmreg >> 12) & 0x0f;
1043 low = nvmreg & 0x00ff;
1044 aprint_normal(" NVM Map version %u.%02x,", high, low);
1045 }
1046 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1047 if (nvmreg != 0xffff) {
1048 high = (nvmreg >> 12) & 0x0f;
1049 low = nvmreg & 0x00ff;
1050 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1051 low);
1052 }
1053 break;
1054 default:
1055 break;
1056 }
1057
1058 /* Print the ETrackID */
1059 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1061 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1062
1063 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1064 error = ixgbe_allocate_msix(adapter, pa);
1065 if (error) {
1066 /* Free allocated queue structures first */
1067 ixgbe_free_transmit_structures(adapter);
1068 ixgbe_free_receive_structures(adapter);
1069 free(adapter->queues, M_DEVBUF);
1070
1071 /* Fallback to legacy interrupt */
1072 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1073 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1074 adapter->feat_en |= IXGBE_FEATURE_MSI;
1075 adapter->num_queues = 1;
1076
1077 /* Allocate our TX/RX Queues again */
1078 if (ixgbe_allocate_queues(adapter)) {
1079 error = ENOMEM;
1080 goto err_out;
1081 }
1082 }
1083 }
1084 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1085 error = ixgbe_allocate_legacy(adapter, pa);
1086 if (error)
1087 goto err_late;
1088
1089 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1090 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1091 ixgbe_handle_link, adapter);
1092 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1093 ixgbe_handle_mod, adapter);
1094 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1095 ixgbe_handle_msf, adapter);
1096 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1097 ixgbe_handle_phy, adapter);
1098 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1099 adapter->fdir_si =
1100 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1101 ixgbe_reinit_fdir, adapter);
1102 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1103 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1104 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1105 && (adapter->fdir_si == NULL))) {
1106 aprint_error_dev(dev,
1107 "could not establish software interrupts ()\n");
1108 goto err_out;
1109 }
1110
1111 error = ixgbe_start_hw(hw);
1112 switch (error) {
1113 case IXGBE_ERR_EEPROM_VERSION:
1114 aprint_error_dev(dev, "This device is a pre-production adapter/"
1115 "LOM. Please be aware there may be issues associated "
1116 "with your hardware.\nIf you are experiencing problems "
1117 "please contact your Intel or hardware representative "
1118 "who provided you with this hardware.\n");
1119 break;
1120 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1121 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1122 error = EIO;
1123 goto err_late;
1124 case IXGBE_ERR_SFP_NOT_PRESENT:
1125 aprint_error_dev(dev, "No SFP+ Module found\n");
1126 /* falls thru */
1127 default:
1128 break;
1129 }
1130
1131 /* Setup OS specific network interface */
1132 if (ixgbe_setup_interface(dev, adapter) != 0)
1133 goto err_late;
1134
1135 /*
1136 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1137 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1138 */
1139 if (hw->phy.media_type == ixgbe_media_type_copper) {
1140 uint16_t id1, id2;
1141 int oui, model, rev;
1142 const char *descr;
1143
1144 id1 = hw->phy.id >> 16;
1145 id2 = hw->phy.id & 0xffff;
1146 oui = MII_OUI(id1, id2);
1147 model = MII_MODEL(id2);
1148 rev = MII_REV(id2);
1149 if ((descr = mii_get_descr(oui, model)) != NULL)
1150 aprint_normal_dev(dev,
1151 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1152 descr, oui, model, rev);
1153 else
1154 aprint_normal_dev(dev,
1155 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1156 oui, model, rev);
1157 }
1158
1159 /* Enable the optics for 82599 SFP+ fiber */
1160 ixgbe_enable_tx_laser(hw);
1161
1162 /* Enable power to the phy. */
1163 ixgbe_set_phy_power(hw, TRUE);
1164
1165 /* Initialize statistics */
1166 ixgbe_update_stats_counters(adapter);
1167
1168 /* Check PCIE slot type/speed/width */
1169 ixgbe_get_slot_info(adapter);
1170
1171 /*
1172 * Do time init and sysctl init here, but
1173 * only on the first port of a bypass adapter.
1174 */
1175 ixgbe_bypass_init(adapter);
1176
1177 /* Set an initial dmac value */
1178 adapter->dmac = 0;
1179 /* Set initial advertised speeds (if applicable) */
1180 adapter->advertise = ixgbe_get_advertise(adapter);
1181
1182 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1183 ixgbe_define_iov_schemas(dev, &error);
1184
1185 /* Add sysctls */
1186 ixgbe_add_device_sysctls(adapter);
1187 ixgbe_add_hw_stats(adapter);
1188
1189 /* For Netmap */
1190 adapter->init_locked = ixgbe_init_locked;
1191 adapter->stop_locked = ixgbe_stop;
1192
1193 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1194 ixgbe_netmap_attach(adapter);
1195
1196 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1197 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1198 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1199 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1200
1201 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1202 pmf_class_network_register(dev, adapter->ifp);
1203 else
1204 aprint_error_dev(dev, "couldn't establish power handler\n");
1205
1206 INIT_DEBUGOUT("ixgbe_attach: end");
1207 adapter->osdep.attached = true;
1208
1209 return;
1210
1211 err_late:
1212 ixgbe_free_transmit_structures(adapter);
1213 ixgbe_free_receive_structures(adapter);
1214 free(adapter->queues, M_DEVBUF);
1215 err_out:
1216 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1217 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1219 ixgbe_free_softint(adapter);
1220 ixgbe_free_pci_resources(adapter);
1221 if (adapter->mta != NULL)
1222 free(adapter->mta, M_DEVBUF);
1223 IXGBE_CORE_LOCK_DESTROY(adapter);
1224
1225 return;
1226 } /* ixgbe_attach */
1227
1228 /************************************************************************
1229 * ixgbe_check_wol_support
1230 *
1231 * Checks whether the adapter's ports are capable of
1232 * Wake On LAN by reading the adapter's NVM.
1233 *
1234 * Sets each port's hw->wol_enabled value depending
1235 * on the value read here.
1236 ************************************************************************/
1237 static void
1238 ixgbe_check_wol_support(struct adapter *adapter)
1239 {
1240 struct ixgbe_hw *hw = &adapter->hw;
1241 u16 dev_caps = 0;
1242
1243 /* Find out WoL support for port */
1244 adapter->wol_support = hw->wol_enabled = 0;
1245 ixgbe_get_device_caps(hw, &dev_caps);
1246 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1247 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1248 hw->bus.func == 0))
1249 adapter->wol_support = hw->wol_enabled = 1;
1250
1251 /* Save initial wake up filter configuration */
1252 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1253
1254 return;
1255 } /* ixgbe_check_wol_support */
1256
1257 /************************************************************************
1258 * ixgbe_setup_interface
1259 *
1260 * Setup networking device structure and register an interface.
1261 ************************************************************************/
1262 static int
1263 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1264 {
1265 struct ethercom *ec = &adapter->osdep.ec;
1266 struct ifnet *ifp;
1267 int rv;
1268
1269 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1270
1271 ifp = adapter->ifp = &ec->ec_if;
1272 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1273 ifp->if_baudrate = IF_Gbps(10);
1274 ifp->if_init = ixgbe_init;
1275 ifp->if_stop = ixgbe_ifstop;
1276 ifp->if_softc = adapter;
1277 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1278 #ifdef IXGBE_MPSAFE
1279 ifp->if_extflags = IFEF_MPSAFE;
1280 #endif
1281 ifp->if_ioctl = ixgbe_ioctl;
1282 #if __FreeBSD_version >= 1100045
1283 /* TSO parameters */
1284 ifp->if_hw_tsomax = 65518;
1285 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1286 ifp->if_hw_tsomaxsegsize = 2048;
1287 #endif
1288 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1289 #if 0
1290 ixgbe_start_locked = ixgbe_legacy_start_locked;
1291 #endif
1292 } else {
1293 ifp->if_transmit = ixgbe_mq_start;
1294 #if 0
1295 ixgbe_start_locked = ixgbe_mq_start_locked;
1296 #endif
1297 }
1298 ifp->if_start = ixgbe_legacy_start;
1299 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1300 IFQ_SET_READY(&ifp->if_snd);
1301
1302 rv = if_initialize(ifp);
1303 if (rv != 0) {
1304 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1305 return rv;
1306 }
1307 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1308 ether_ifattach(ifp, adapter->hw.mac.addr);
1309 /*
1310 * We use per TX queue softint, so if_deferred_start_init() isn't
1311 * used.
1312 */
1313 if_register(ifp);
1314 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1315
1316 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1317
1318 /*
1319 * Tell the upper layer(s) we support long frames.
1320 */
1321 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1322
1323 /* Set capability flags */
1324 ifp->if_capabilities |= IFCAP_RXCSUM
1325 | IFCAP_TXCSUM
1326 | IFCAP_TSOv4
1327 | IFCAP_TSOv6
1328 | IFCAP_LRO;
1329 ifp->if_capenable = 0;
1330
1331 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1332 | ETHERCAP_VLAN_HWCSUM
1333 | ETHERCAP_JUMBO_MTU
1334 | ETHERCAP_VLAN_MTU;
1335
1336 /* Enable the above capabilities by default */
1337 ec->ec_capenable = ec->ec_capabilities;
1338
1339 /*
1340 * Don't turn this on by default, if vlans are
1341 * created on another pseudo device (eg. lagg)
1342 * then vlan events are not passed thru, breaking
1343 * operation, but with HW FILTER off it works. If
1344 * using vlans directly on the ixgbe driver you can
1345 * enable this and get full hardware tag filtering.
1346 */
1347 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1348
1349 /*
1350 * Specify the media types supported by this adapter and register
1351 * callbacks to update media and link information
1352 */
1353 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1354 ixgbe_media_status);
1355
1356 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1357 ixgbe_add_media_types(adapter);
1358
1359 /* Set autoselect media by default */
1360 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1361
1362 return (0);
1363 } /* ixgbe_setup_interface */
1364
1365 /************************************************************************
1366 * ixgbe_add_media_types
1367 ************************************************************************/
1368 static void
1369 ixgbe_add_media_types(struct adapter *adapter)
1370 {
1371 struct ixgbe_hw *hw = &adapter->hw;
1372 device_t dev = adapter->dev;
1373 u64 layer;
1374
1375 layer = adapter->phy_layer;
1376
1377 #define ADD(mm, dd) \
1378 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1379
1380 /* Media types with matching NetBSD media defines */
1381 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1382 ADD(IFM_10G_T | IFM_FDX, 0);
1383 }
1384 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1385 ADD(IFM_1000_T | IFM_FDX, 0);
1386 }
1387 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1388 ADD(IFM_100_TX | IFM_FDX, 0);
1389 }
1390 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1391 ADD(IFM_10_T | IFM_FDX, 0);
1392 }
1393
1394 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1395 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1396 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1397 }
1398
1399 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1400 ADD(IFM_10G_LR | IFM_FDX, 0);
1401 if (hw->phy.multispeed_fiber) {
1402 ADD(IFM_1000_LX | IFM_FDX, 0);
1403 }
1404 }
1405 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1406 ADD(IFM_10G_SR | IFM_FDX, 0);
1407 if (hw->phy.multispeed_fiber) {
1408 ADD(IFM_1000_SX | IFM_FDX, 0);
1409 }
1410 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1411 ADD(IFM_1000_SX | IFM_FDX, 0);
1412 }
1413 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1414 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1415 }
1416
1417 #ifdef IFM_ETH_XTYPE
1418 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1419 ADD(IFM_10G_KR | IFM_FDX, 0);
1420 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1422 ADD(AIFM_10G_KX4 | IFM_FDX, 0);
1423 }
1424 #else
1425 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1426 device_printf(dev, "Media supported: 10GbaseKR\n");
1427 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1428 ADD(IFM_10G_SR | IFM_FDX, 0);
1429 }
1430 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1431 device_printf(dev, "Media supported: 10GbaseKX4\n");
1432 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1433 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1434 }
1435 #endif
1436 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1437 ADD(IFM_1000_KX | IFM_FDX, 0);
1438 }
1439 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1440 ADD(IFM_2500_KX | IFM_FDX, 0);
1441 }
1442 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1443 ADD(IFM_2500_T | IFM_FDX, 0);
1444 }
1445 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1446 ADD(IFM_5000_T | IFM_FDX, 0);
1447 }
1448 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1449 device_printf(dev, "Media supported: 1000baseBX\n");
1450 /* XXX no ifmedia_set? */
1451
1452 ADD(IFM_AUTO, 0);
1453
1454 #undef ADD
1455 } /* ixgbe_add_media_types */
1456
1457 /************************************************************************
1458 * ixgbe_is_sfp
1459 ************************************************************************/
1460 static inline bool
1461 ixgbe_is_sfp(struct ixgbe_hw *hw)
1462 {
1463 switch (hw->mac.type) {
1464 case ixgbe_mac_82598EB:
1465 if (hw->phy.type == ixgbe_phy_nl)
1466 return TRUE;
1467 return FALSE;
1468 case ixgbe_mac_82599EB:
1469 switch (hw->mac.ops.get_media_type(hw)) {
1470 case ixgbe_media_type_fiber:
1471 case ixgbe_media_type_fiber_qsfp:
1472 return TRUE;
1473 default:
1474 return FALSE;
1475 }
1476 case ixgbe_mac_X550EM_x:
1477 case ixgbe_mac_X550EM_a:
1478 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1479 return TRUE;
1480 return FALSE;
1481 default:
1482 return FALSE;
1483 }
1484 } /* ixgbe_is_sfp */
1485
1486 /************************************************************************
1487 * ixgbe_config_link
1488 ************************************************************************/
1489 static void
1490 ixgbe_config_link(struct adapter *adapter)
1491 {
1492 struct ixgbe_hw *hw = &adapter->hw;
1493 u32 autoneg, err = 0;
1494 bool sfp, negotiate = false;
1495
1496 sfp = ixgbe_is_sfp(hw);
1497
1498 if (sfp) {
1499 if (hw->phy.multispeed_fiber) {
1500 hw->mac.ops.setup_sfp(hw);
1501 ixgbe_enable_tx_laser(hw);
1502 kpreempt_disable();
1503 softint_schedule(adapter->msf_si);
1504 kpreempt_enable();
1505 } else {
1506 kpreempt_disable();
1507 softint_schedule(adapter->mod_si);
1508 kpreempt_enable();
1509 }
1510 } else {
1511 if (hw->mac.ops.check_link)
1512 err = ixgbe_check_link(hw, &adapter->link_speed,
1513 &adapter->link_up, FALSE);
1514 if (err)
1515 goto out;
1516 autoneg = hw->phy.autoneg_advertised;
1517 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1518 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1519 &negotiate);
1520 if (err)
1521 goto out;
1522 if (hw->mac.ops.setup_link)
1523 err = hw->mac.ops.setup_link(hw, autoneg,
1524 adapter->link_up);
1525 }
1526 out:
1527
1528 return;
1529 } /* ixgbe_config_link */
1530
1531 /************************************************************************
1532 * ixgbe_update_stats_counters - Update board statistics counters.
1533 ************************************************************************/
1534 static void
1535 ixgbe_update_stats_counters(struct adapter *adapter)
1536 {
1537 struct ifnet *ifp = adapter->ifp;
1538 struct ixgbe_hw *hw = &adapter->hw;
1539 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1540 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1541 u64 total_missed_rx = 0;
1542 uint64_t crcerrs, rlec;
1543
1544 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1545 stats->crcerrs.ev_count += crcerrs;
1546 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1547 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1548 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1549 if (hw->mac.type == ixgbe_mac_X550)
1550 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1551
1552 for (int i = 0; i < __arraycount(stats->qprc); i++) {
1553 int j = i % adapter->num_queues;
1554 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1555 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1556 stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1557 }
1558 for (int i = 0; i < __arraycount(stats->mpc); i++) {
1559 uint32_t mp;
1560 int j = i % adapter->num_queues;
1561
1562 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1563 /* global total per queue */
1564 stats->mpc[j].ev_count += mp;
1565 /* running comprehensive total for stats display */
1566 total_missed_rx += mp;
1567
1568 if (hw->mac.type == ixgbe_mac_82598EB)
1569 stats->rnbc[j].ev_count
1570 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1571
1572 }
1573 stats->mpctotal.ev_count += total_missed_rx;
1574
1575 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1576 if ((adapter->link_active == TRUE)
1577 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1578 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1579 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1580 }
1581 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1582 stats->rlec.ev_count += rlec;
1583
1584 /* Hardware workaround, gprc counts missed packets */
1585 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1586
1587 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1588 stats->lxontxc.ev_count += lxon;
1589 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1590 stats->lxofftxc.ev_count += lxoff;
1591 total = lxon + lxoff;
1592
1593 if (hw->mac.type != ixgbe_mac_82598EB) {
1594 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1595 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1596 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1597 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1598 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1599 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1600 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1601 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1602 } else {
1603 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1604 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1605 /* 82598 only has a counter in the high register */
1606 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1607 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1608 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1609 }
1610
1611 /*
1612 * Workaround: mprc hardware is incorrectly counting
1613 * broadcasts, so for now we subtract those.
1614 */
1615 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1616 stats->bprc.ev_count += bprc;
1617 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1618 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1619
1620 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1621 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1622 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1623 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1624 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1625 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1626
1627 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1628 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1629 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1630
1631 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1632 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1633 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1634 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1635 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1636 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1637 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1638 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1639 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1640 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1641 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1642 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1643 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1644 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1645 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1646 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1647 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1648 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1649 /* Only read FCOE on 82599 */
1650 if (hw->mac.type != ixgbe_mac_82598EB) {
1651 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1652 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1653 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1654 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1655 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1656 }
1657
1658 /* Fill out the OS statistics structure */
1659 /*
1660 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1661 * adapter->stats counters. It's required to make ifconfig -z
1662 * (SOICZIFDATA) work.
1663 */
1664 ifp->if_collisions = 0;
1665
1666 /* Rx Errors */
1667 ifp->if_iqdrops += total_missed_rx;
1668 ifp->if_ierrors += crcerrs + rlec;
1669 } /* ixgbe_update_stats_counters */
1670
1671 /************************************************************************
1672 * ixgbe_add_hw_stats
1673 *
1674 * Add sysctl variables, one per statistic, to the system.
1675 ************************************************************************/
1676 static void
1677 ixgbe_add_hw_stats(struct adapter *adapter)
1678 {
1679 device_t dev = adapter->dev;
1680 const struct sysctlnode *rnode, *cnode;
1681 struct sysctllog **log = &adapter->sysctllog;
1682 struct tx_ring *txr = adapter->tx_rings;
1683 struct rx_ring *rxr = adapter->rx_rings;
1684 struct ixgbe_hw *hw = &adapter->hw;
1685 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1686 const char *xname = device_xname(dev);
1687
1688 /* Driver Statistics */
1689 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
1690 NULL, xname, "Handled queue in softint");
1691 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
1692 NULL, xname, "Requeued in softint");
1693 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1694 NULL, xname, "Driver tx dma soft fail EFBIG");
1695 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1696 NULL, xname, "m_defrag() failed");
1697 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1698 NULL, xname, "Driver tx dma hard fail EFBIG");
1699 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1700 NULL, xname, "Driver tx dma hard fail EINVAL");
1701 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1702 NULL, xname, "Driver tx dma hard fail other");
1703 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1704 NULL, xname, "Driver tx dma soft fail EAGAIN");
1705 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1706 NULL, xname, "Driver tx dma soft fail ENOMEM");
1707 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1708 NULL, xname, "Watchdog timeouts");
1709 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1710 NULL, xname, "TSO errors");
1711 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1712 NULL, xname, "Link MSI-X IRQ Handled");
1713
1714 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1715 snprintf(adapter->queues[i].evnamebuf,
1716 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1717 xname, i);
1718 snprintf(adapter->queues[i].namebuf,
1719 sizeof(adapter->queues[i].namebuf), "q%d", i);
1720
1721 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1722 aprint_error_dev(dev, "could not create sysctl root\n");
1723 break;
1724 }
1725
1726 if (sysctl_createv(log, 0, &rnode, &rnode,
1727 0, CTLTYPE_NODE,
1728 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1729 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1730 break;
1731
1732 if (sysctl_createv(log, 0, &rnode, &cnode,
1733 CTLFLAG_READWRITE, CTLTYPE_INT,
1734 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1735 ixgbe_sysctl_interrupt_rate_handler, 0,
1736 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1737 break;
1738
1739 #if 0 /* XXX msaitoh */
1740 if (sysctl_createv(log, 0, &rnode, &cnode,
1741 CTLFLAG_READONLY, CTLTYPE_QUAD,
1742 "irqs", SYSCTL_DESCR("irqs on this queue"),
1743 NULL, 0, &(adapter->queues[i].irqs),
1744 0, CTL_CREATE, CTL_EOL) != 0)
1745 break;
1746 #endif
1747
1748 if (sysctl_createv(log, 0, &rnode, &cnode,
1749 CTLFLAG_READONLY, CTLTYPE_INT,
1750 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1751 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1752 0, CTL_CREATE, CTL_EOL) != 0)
1753 break;
1754
1755 if (sysctl_createv(log, 0, &rnode, &cnode,
1756 CTLFLAG_READONLY, CTLTYPE_INT,
1757 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1758 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1759 0, CTL_CREATE, CTL_EOL) != 0)
1760 break;
1761
1762 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1763 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1764 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1765 NULL, adapter->queues[i].evnamebuf, "TSO");
1766 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1767 NULL, adapter->queues[i].evnamebuf,
1768 "Queue No Descriptor Available");
1769 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1770 NULL, adapter->queues[i].evnamebuf,
1771 "Queue Packets Transmitted");
1772 #ifndef IXGBE_LEGACY_TX
1773 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1774 NULL, adapter->queues[i].evnamebuf,
1775 "Packets dropped in pcq");
1776 #endif
1777
1778 #ifdef LRO
1779 struct lro_ctrl *lro = &rxr->lro;
1780 #endif /* LRO */
1781
1782 if (sysctl_createv(log, 0, &rnode, &cnode,
1783 CTLFLAG_READONLY,
1784 CTLTYPE_INT,
1785 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1786 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1787 CTL_CREATE, CTL_EOL) != 0)
1788 break;
1789
1790 if (sysctl_createv(log, 0, &rnode, &cnode,
1791 CTLFLAG_READONLY,
1792 CTLTYPE_INT,
1793 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1794 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1795 CTL_CREATE, CTL_EOL) != 0)
1796 break;
1797
1798 if (i < __arraycount(stats->mpc)) {
1799 evcnt_attach_dynamic(&stats->mpc[i],
1800 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1801 "RX Missed Packet Count");
1802 if (hw->mac.type == ixgbe_mac_82598EB)
1803 evcnt_attach_dynamic(&stats->rnbc[i],
1804 EVCNT_TYPE_MISC, NULL,
1805 adapter->queues[i].evnamebuf,
1806 "Receive No Buffers");
1807 }
1808 if (i < __arraycount(stats->pxontxc)) {
1809 evcnt_attach_dynamic(&stats->pxontxc[i],
1810 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1811 "pxontxc");
1812 evcnt_attach_dynamic(&stats->pxonrxc[i],
1813 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1814 "pxonrxc");
1815 evcnt_attach_dynamic(&stats->pxofftxc[i],
1816 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1817 "pxofftxc");
1818 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1819 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1820 "pxoffrxc");
1821 evcnt_attach_dynamic(&stats->pxon2offc[i],
1822 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1823 "pxon2offc");
1824 }
1825 if (i < __arraycount(stats->qprc)) {
1826 evcnt_attach_dynamic(&stats->qprc[i],
1827 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1828 "qprc");
1829 evcnt_attach_dynamic(&stats->qptc[i],
1830 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1831 "qptc");
1832 evcnt_attach_dynamic(&stats->qbrc[i],
1833 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1834 "qbrc");
1835 evcnt_attach_dynamic(&stats->qbtc[i],
1836 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1837 "qbtc");
1838 evcnt_attach_dynamic(&stats->qprdc[i],
1839 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1840 "qprdc");
1841 }
1842
1843 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1844 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1845 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1846 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1847 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1848 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1849 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1850 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1851 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1852 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1853 #ifdef LRO
1854 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1855 CTLFLAG_RD, &lro->lro_queued, 0,
1856 "LRO Queued");
1857 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1858 CTLFLAG_RD, &lro->lro_flushed, 0,
1859 "LRO Flushed");
1860 #endif /* LRO */
1861 }
1862
1863 /* MAC stats get their own sub node */
1864
1865 snprintf(stats->namebuf,
1866 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1867
1868 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1869 stats->namebuf, "rx csum offload - IP");
1870 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1871 stats->namebuf, "rx csum offload - L4");
1872 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1873 stats->namebuf, "rx csum offload - IP bad");
1874 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1875 stats->namebuf, "rx csum offload - L4 bad");
1876 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1877 stats->namebuf, "Interrupt conditions zero");
1878 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1879 stats->namebuf, "Legacy interrupts");
1880
1881 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1882 stats->namebuf, "CRC Errors");
1883 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1884 stats->namebuf, "Illegal Byte Errors");
1885 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1886 stats->namebuf, "Byte Errors");
1887 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1888 stats->namebuf, "MAC Short Packets Discarded");
1889 if (hw->mac.type >= ixgbe_mac_X550)
1890 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1891 stats->namebuf, "Bad SFD");
1892 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1893 stats->namebuf, "Total Packets Missed");
1894 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1895 stats->namebuf, "MAC Local Faults");
1896 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1897 stats->namebuf, "MAC Remote Faults");
1898 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1899 stats->namebuf, "Receive Length Errors");
1900 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1901 stats->namebuf, "Link XON Transmitted");
1902 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1903 stats->namebuf, "Link XON Received");
1904 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1905 stats->namebuf, "Link XOFF Transmitted");
1906 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1907 stats->namebuf, "Link XOFF Received");
1908
1909 /* Packet Reception Stats */
1910 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1911 stats->namebuf, "Total Octets Received");
1912 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1913 stats->namebuf, "Good Octets Received");
1914 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1915 stats->namebuf, "Total Packets Received");
1916 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1917 stats->namebuf, "Good Packets Received");
1918 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1919 stats->namebuf, "Multicast Packets Received");
1920 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1921 stats->namebuf, "Broadcast Packets Received");
1922 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1923 stats->namebuf, "64 byte frames received ");
1924 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1925 stats->namebuf, "65-127 byte frames received");
1926 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1927 stats->namebuf, "128-255 byte frames received");
1928 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1929 stats->namebuf, "256-511 byte frames received");
1930 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1931 stats->namebuf, "512-1023 byte frames received");
1932 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
1933 stats->namebuf, "1023-1522 byte frames received");
1934 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
1935 stats->namebuf, "Receive Undersized");
1936 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
1937 stats->namebuf, "Fragmented Packets Received ");
1938 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
1939 stats->namebuf, "Oversized Packets Received");
1940 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "Received Jabber");
1942 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "Management Packets Received");
1944 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "Management Packets Dropped");
1946 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "Checksum Errors");
1948
1949 /* Packet Transmission Stats */
1950 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Good Octets Transmitted");
1952 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Total Packets Transmitted");
1954 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Good Packets Transmitted");
1956 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "Broadcast Packets Transmitted");
1958 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "Multicast Packets Transmitted");
1960 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
1961 stats->namebuf, "Management Packets Transmitted");
1962 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "64 byte frames transmitted ");
1964 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "65-127 byte frames transmitted");
1966 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "128-255 byte frames transmitted");
1968 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "256-511 byte frames transmitted");
1970 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "512-1023 byte frames transmitted");
1972 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "1024-1522 byte frames transmitted");
1974 } /* ixgbe_add_hw_stats */
1975
1976 static void
1977 ixgbe_clear_evcnt(struct adapter *adapter)
1978 {
1979 struct tx_ring *txr = adapter->tx_rings;
1980 struct rx_ring *rxr = adapter->rx_rings;
1981 struct ixgbe_hw *hw = &adapter->hw;
1982 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1983
1984 adapter->handleq.ev_count = 0;
1985 adapter->req.ev_count = 0;
1986 adapter->efbig_tx_dma_setup.ev_count = 0;
1987 adapter->mbuf_defrag_failed.ev_count = 0;
1988 adapter->efbig2_tx_dma_setup.ev_count = 0;
1989 adapter->einval_tx_dma_setup.ev_count = 0;
1990 adapter->other_tx_dma_setup.ev_count = 0;
1991 adapter->eagain_tx_dma_setup.ev_count = 0;
1992 adapter->enomem_tx_dma_setup.ev_count = 0;
1993 adapter->watchdog_events.ev_count = 0;
1994 adapter->tso_err.ev_count = 0;
1995 adapter->link_irq.ev_count = 0;
1996
1997 txr = adapter->tx_rings;
1998 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1999 adapter->queues[i].irqs.ev_count = 0;
2000 txr->no_desc_avail.ev_count = 0;
2001 txr->total_packets.ev_count = 0;
2002 txr->tso_tx.ev_count = 0;
2003 #ifndef IXGBE_LEGACY_TX
2004 txr->pcq_drops.ev_count = 0;
2005 #endif
2006
2007 if (i < __arraycount(stats->mpc)) {
2008 stats->mpc[i].ev_count = 0;
2009 if (hw->mac.type == ixgbe_mac_82598EB)
2010 stats->rnbc[i].ev_count = 0;
2011 }
2012 if (i < __arraycount(stats->pxontxc)) {
2013 stats->pxontxc[i].ev_count = 0;
2014 stats->pxonrxc[i].ev_count = 0;
2015 stats->pxofftxc[i].ev_count = 0;
2016 stats->pxoffrxc[i].ev_count = 0;
2017 stats->pxon2offc[i].ev_count = 0;
2018 }
2019 if (i < __arraycount(stats->qprc)) {
2020 stats->qprc[i].ev_count = 0;
2021 stats->qptc[i].ev_count = 0;
2022 stats->qbrc[i].ev_count = 0;
2023 stats->qbtc[i].ev_count = 0;
2024 stats->qprdc[i].ev_count = 0;
2025 }
2026
2027 rxr->rx_packets.ev_count = 0;
2028 rxr->rx_bytes.ev_count = 0;
2029 rxr->rx_copies.ev_count = 0;
2030 rxr->no_jmbuf.ev_count = 0;
2031 rxr->rx_discarded.ev_count = 0;
2032 }
2033 stats->ipcs.ev_count = 0;
2034 stats->l4cs.ev_count = 0;
2035 stats->ipcs_bad.ev_count = 0;
2036 stats->l4cs_bad.ev_count = 0;
2037 stats->intzero.ev_count = 0;
2038 stats->legint.ev_count = 0;
2039 stats->crcerrs.ev_count = 0;
2040 stats->illerrc.ev_count = 0;
2041 stats->errbc.ev_count = 0;
2042 stats->mspdc.ev_count = 0;
2043 stats->mbsdc.ev_count = 0;
2044 stats->mpctotal.ev_count = 0;
2045 stats->mlfc.ev_count = 0;
2046 stats->mrfc.ev_count = 0;
2047 stats->rlec.ev_count = 0;
2048 stats->lxontxc.ev_count = 0;
2049 stats->lxonrxc.ev_count = 0;
2050 stats->lxofftxc.ev_count = 0;
2051 stats->lxoffrxc.ev_count = 0;
2052
2053 /* Packet Reception Stats */
2054 stats->tor.ev_count = 0;
2055 stats->gorc.ev_count = 0;
2056 stats->tpr.ev_count = 0;
2057 stats->gprc.ev_count = 0;
2058 stats->mprc.ev_count = 0;
2059 stats->bprc.ev_count = 0;
2060 stats->prc64.ev_count = 0;
2061 stats->prc127.ev_count = 0;
2062 stats->prc255.ev_count = 0;
2063 stats->prc511.ev_count = 0;
2064 stats->prc1023.ev_count = 0;
2065 stats->prc1522.ev_count = 0;
2066 stats->ruc.ev_count = 0;
2067 stats->rfc.ev_count = 0;
2068 stats->roc.ev_count = 0;
2069 stats->rjc.ev_count = 0;
2070 stats->mngprc.ev_count = 0;
2071 stats->mngpdc.ev_count = 0;
2072 stats->xec.ev_count = 0;
2073
2074 /* Packet Transmission Stats */
2075 stats->gotc.ev_count = 0;
2076 stats->tpt.ev_count = 0;
2077 stats->gptc.ev_count = 0;
2078 stats->bptc.ev_count = 0;
2079 stats->mptc.ev_count = 0;
2080 stats->mngptc.ev_count = 0;
2081 stats->ptc64.ev_count = 0;
2082 stats->ptc127.ev_count = 0;
2083 stats->ptc255.ev_count = 0;
2084 stats->ptc511.ev_count = 0;
2085 stats->ptc1023.ev_count = 0;
2086 stats->ptc1522.ev_count = 0;
2087 }
2088
2089 /************************************************************************
2090 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2091 *
2092 * Retrieves the TDH value from the hardware
2093 ************************************************************************/
2094 static int
2095 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2096 {
2097 struct sysctlnode node = *rnode;
2098 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2099 uint32_t val;
2100
2101 if (!txr)
2102 return (0);
2103
2104 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
2105 node.sysctl_data = &val;
2106 return sysctl_lookup(SYSCTLFN_CALL(&node));
2107 } /* ixgbe_sysctl_tdh_handler */
2108
2109 /************************************************************************
2110 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2111 *
2112 * Retrieves the TDT value from the hardware
2113 ************************************************************************/
2114 static int
2115 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2116 {
2117 struct sysctlnode node = *rnode;
2118 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2119 uint32_t val;
2120
2121 if (!txr)
2122 return (0);
2123
2124 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
2125 node.sysctl_data = &val;
2126 return sysctl_lookup(SYSCTLFN_CALL(&node));
2127 } /* ixgbe_sysctl_tdt_handler */
2128
2129 /************************************************************************
2130 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2131 *
2132 * Retrieves the RDH value from the hardware
2133 ************************************************************************/
2134 static int
2135 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2136 {
2137 struct sysctlnode node = *rnode;
2138 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2139 uint32_t val;
2140
2141 if (!rxr)
2142 return (0);
2143
2144 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
2145 node.sysctl_data = &val;
2146 return sysctl_lookup(SYSCTLFN_CALL(&node));
2147 } /* ixgbe_sysctl_rdh_handler */
2148
2149 /************************************************************************
2150 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2151 *
2152 * Retrieves the RDT value from the hardware
2153 ************************************************************************/
2154 static int
2155 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2156 {
2157 struct sysctlnode node = *rnode;
2158 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2159 uint32_t val;
2160
2161 if (!rxr)
2162 return (0);
2163
2164 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
2165 node.sysctl_data = &val;
2166 return sysctl_lookup(SYSCTLFN_CALL(&node));
2167 } /* ixgbe_sysctl_rdt_handler */
2168
2169 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2170 /************************************************************************
2171 * ixgbe_register_vlan
2172 *
2173 * Run via vlan config EVENT, it enables us to use the
2174 * HW Filter table since we can get the vlan id. This
2175 * just creates the entry in the soft version of the
2176 * VFTA, init will repopulate the real table.
2177 ************************************************************************/
2178 static void
2179 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2180 {
2181 struct adapter *adapter = ifp->if_softc;
2182 u16 index, bit;
2183
2184 if (ifp->if_softc != arg) /* Not our event */
2185 return;
2186
2187 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2188 return;
2189
2190 IXGBE_CORE_LOCK(adapter);
2191 index = (vtag >> 5) & 0x7F;
2192 bit = vtag & 0x1F;
2193 adapter->shadow_vfta[index] |= (1 << bit);
2194 ixgbe_setup_vlan_hw_support(adapter);
2195 IXGBE_CORE_UNLOCK(adapter);
2196 } /* ixgbe_register_vlan */
2197
2198 /************************************************************************
2199 * ixgbe_unregister_vlan
2200 *
2201 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2202 ************************************************************************/
2203 static void
2204 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2205 {
2206 struct adapter *adapter = ifp->if_softc;
2207 u16 index, bit;
2208
2209 if (ifp->if_softc != arg)
2210 return;
2211
2212 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2213 return;
2214
2215 IXGBE_CORE_LOCK(adapter);
2216 index = (vtag >> 5) & 0x7F;
2217 bit = vtag & 0x1F;
2218 adapter->shadow_vfta[index] &= ~(1 << bit);
2219 /* Re-init to load the changes */
2220 ixgbe_setup_vlan_hw_support(adapter);
2221 IXGBE_CORE_UNLOCK(adapter);
2222 } /* ixgbe_unregister_vlan */
2223 #endif
2224
2225 static void
2226 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2227 {
2228 struct ethercom *ec = &adapter->osdep.ec;
2229 struct ixgbe_hw *hw = &adapter->hw;
2230 struct rx_ring *rxr;
2231 int i;
2232 u32 ctrl;
2233
2234
2235 /*
2236 * We get here thru init_locked, meaning
2237 * a soft reset, this has already cleared
2238 * the VFTA and other state, so if there
2239 * have been no vlan's registered do nothing.
2240 */
2241 if (!VLAN_ATTACHED(&adapter->osdep.ec))
2242 return;
2243
2244 /* Setup the queues for vlans */
2245 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
2246 for (i = 0; i < adapter->num_queues; i++) {
2247 rxr = &adapter->rx_rings[i];
2248 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2249 if (hw->mac.type != ixgbe_mac_82598EB) {
2250 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2251 ctrl |= IXGBE_RXDCTL_VME;
2252 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2253 }
2254 rxr->vtag_strip = TRUE;
2255 }
2256 }
2257
2258 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
2259 return;
2260 /*
2261 * A soft reset zero's out the VFTA, so
2262 * we need to repopulate it now.
2263 */
2264 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2265 if (adapter->shadow_vfta[i] != 0)
2266 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2267 adapter->shadow_vfta[i]);
2268
2269 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2270 /* Enable the Filter Table if enabled */
2271 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
2272 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2273 ctrl |= IXGBE_VLNCTRL_VFE;
2274 }
2275 if (hw->mac.type == ixgbe_mac_82598EB)
2276 ctrl |= IXGBE_VLNCTRL_VME;
2277 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2278 } /* ixgbe_setup_vlan_hw_support */
2279
2280 /************************************************************************
2281 * ixgbe_get_slot_info
2282 *
2283 * Get the width and transaction speed of
2284 * the slot this adapter is plugged into.
2285 ************************************************************************/
2286 static void
2287 ixgbe_get_slot_info(struct adapter *adapter)
2288 {
2289 device_t dev = adapter->dev;
2290 struct ixgbe_hw *hw = &adapter->hw;
2291 u32 offset;
2292 // struct ixgbe_mac_info *mac = &hw->mac;
2293 u16 link;
2294 int bus_info_valid = TRUE;
2295
2296 /* Some devices are behind an internal bridge */
2297 switch (hw->device_id) {
2298 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2299 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2300 goto get_parent_info;
2301 default:
2302 break;
2303 }
2304
2305 ixgbe_get_bus_info(hw);
2306
2307 /*
2308 * Some devices don't use PCI-E, but there is no need
2309 * to display "Unknown" for bus speed and width.
2310 */
2311 switch (hw->mac.type) {
2312 case ixgbe_mac_X550EM_x:
2313 case ixgbe_mac_X550EM_a:
2314 return;
2315 default:
2316 goto display;
2317 }
2318
2319 get_parent_info:
2320 /*
2321 * For the Quad port adapter we need to parse back
2322 * up the PCI tree to find the speed of the expansion
2323 * slot into which this adapter is plugged. A bit more work.
2324 */
2325 dev = device_parent(device_parent(dev));
2326 #if 0
2327 #ifdef IXGBE_DEBUG
2328 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2329 pci_get_slot(dev), pci_get_function(dev));
2330 #endif
2331 dev = device_parent(device_parent(dev));
2332 #ifdef IXGBE_DEBUG
2333 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2334 pci_get_slot(dev), pci_get_function(dev));
2335 #endif
2336 #endif
2337 /* Now get the PCI Express Capabilities offset */
2338 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2339 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2340 /*
2341 * Hmm...can't get PCI-Express capabilities.
2342 * Falling back to default method.
2343 */
2344 bus_info_valid = FALSE;
2345 ixgbe_get_bus_info(hw);
2346 goto display;
2347 }
2348 /* ...and read the Link Status Register */
2349 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2350 offset + PCIE_LCSR) >> 16;
2351 ixgbe_set_pci_config_data_generic(hw, link);
2352
2353 display:
2354 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2355 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2356 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2357 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2358 "Unknown"),
2359 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2360 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2361 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2362 "Unknown"));
2363
2364 if (bus_info_valid) {
2365 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2366 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2367 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2368 device_printf(dev, "PCI-Express bandwidth available"
2369 " for this card\n is not sufficient for"
2370 " optimal performance.\n");
2371 device_printf(dev, "For optimal performance a x8 "
2372 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2373 }
2374 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2375 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2376 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2377 device_printf(dev, "PCI-Express bandwidth available"
2378 " for this card\n is not sufficient for"
2379 " optimal performance.\n");
2380 device_printf(dev, "For optimal performance a x8 "
2381 "PCIE Gen3 slot is required.\n");
2382 }
2383 } else
2384 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2385
2386 return;
2387 } /* ixgbe_get_slot_info */
2388
2389 /************************************************************************
2390 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2391 ************************************************************************/
2392 static inline void
2393 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2394 {
2395 struct ixgbe_hw *hw = &adapter->hw;
2396 struct ix_queue *que = &adapter->queues[vector];
2397 u64 queue = (u64)(1ULL << vector);
2398 u32 mask;
2399
2400 mutex_enter(&que->im_mtx);
2401 if (que->im_nest > 0 && --que->im_nest > 0)
2402 goto out;
2403
2404 if (hw->mac.type == ixgbe_mac_82598EB) {
2405 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2406 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2407 } else {
2408 mask = (queue & 0xFFFFFFFF);
2409 if (mask)
2410 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2411 mask = (queue >> 32);
2412 if (mask)
2413 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2414 }
2415 out:
2416 mutex_exit(&que->im_mtx);
2417 } /* ixgbe_enable_queue */
2418
2419 /************************************************************************
2420 * ixgbe_disable_queue
2421 ************************************************************************/
2422 static inline void
2423 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2424 {
2425 struct ixgbe_hw *hw = &adapter->hw;
2426 struct ix_queue *que = &adapter->queues[vector];
2427 u64 queue = (u64)(1ULL << vector);
2428 u32 mask;
2429
2430 mutex_enter(&que->im_mtx);
2431 if (que->im_nest++ > 0)
2432 goto out;
2433
2434 if (hw->mac.type == ixgbe_mac_82598EB) {
2435 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2436 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2437 } else {
2438 mask = (queue & 0xFFFFFFFF);
2439 if (mask)
2440 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2441 mask = (queue >> 32);
2442 if (mask)
2443 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2444 }
2445 out:
2446 mutex_exit(&que->im_mtx);
2447 } /* ixgbe_disable_queue */
2448
2449 /************************************************************************
2450 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2451 ************************************************************************/
2452 static int
2453 ixgbe_msix_que(void *arg)
2454 {
2455 struct ix_queue *que = arg;
2456 struct adapter *adapter = que->adapter;
2457 struct ifnet *ifp = adapter->ifp;
2458 struct tx_ring *txr = que->txr;
2459 struct rx_ring *rxr = que->rxr;
2460 bool more;
2461 u32 newitr = 0;
2462
2463 /* Protect against spurious interrupts */
2464 if ((ifp->if_flags & IFF_RUNNING) == 0)
2465 return 0;
2466
2467 ixgbe_disable_queue(adapter, que->msix);
2468 ++que->irqs.ev_count;
2469
2470 #ifdef __NetBSD__
2471 /* Don't run ixgbe_rxeof in interrupt context */
2472 more = true;
2473 #else
2474 more = ixgbe_rxeof(que);
2475 #endif
2476
2477 IXGBE_TX_LOCK(txr);
2478 ixgbe_txeof(txr);
2479 IXGBE_TX_UNLOCK(txr);
2480
2481 /* Do AIM now? */
2482
2483 if (adapter->enable_aim == false)
2484 goto no_calc;
2485 /*
2486 * Do Adaptive Interrupt Moderation:
2487 * - Write out last calculated setting
2488 * - Calculate based on average size over
2489 * the last interval.
2490 */
2491 if (que->eitr_setting)
2492 ixgbe_eitr_write(que, que->eitr_setting);
2493
2494 que->eitr_setting = 0;
2495
2496 /* Idle, do nothing */
2497 if ((txr->bytes == 0) && (rxr->bytes == 0))
2498 goto no_calc;
2499
2500 if ((txr->bytes) && (txr->packets))
2501 newitr = txr->bytes/txr->packets;
2502 if ((rxr->bytes) && (rxr->packets))
2503 newitr = max(newitr, (rxr->bytes / rxr->packets));
2504 newitr += 24; /* account for hardware frame, crc */
2505
2506 /* set an upper boundary */
2507 newitr = min(newitr, 3000);
2508
2509 /* Be nice to the mid range */
2510 if ((newitr > 300) && (newitr < 1200))
2511 newitr = (newitr / 3);
2512 else
2513 newitr = (newitr / 2);
2514
2515 /*
2516 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2517 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2518 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2519 * on 1G and higher.
2520 */
2521 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2522 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2523 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2524 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2525 }
2526
2527 /* save for next interrupt */
2528 que->eitr_setting = newitr;
2529
2530 /* Reset state */
2531 txr->bytes = 0;
2532 txr->packets = 0;
2533 rxr->bytes = 0;
2534 rxr->packets = 0;
2535
2536 no_calc:
2537 if (more) {
2538 if (adapter->txrx_use_workqueue) {
2539 /*
2540 * adapter->que_wq is bound to each CPU instead of
2541 * each NIC queue to reduce workqueue kthread. As we
2542 * should consider about interrupt affinity in this
2543 * function, the workqueue kthread must be WQ_PERCPU.
2544 * If create WQ_PERCPU workqueue kthread for each NIC
2545 * queue, that number of created workqueue kthread is
2546 * (number of used NIC queue) * (number of CPUs) =
2547 * (number of CPUs) ^ 2 most often.
2548 *
2549 * The same NIC queue's interrupts are avoided by
2550 * masking the queue's interrupt. And different
2551 * NIC queue's interrupts use different struct work
2552 * (que->wq_cookie). So, "enqueued flag" to avoid
2553 * twice workqueue_enqueue() is not required .
2554 */
2555 workqueue_enqueue(adapter->que_wq, &que->wq_cookie,
2556 curcpu());
2557 } else {
2558 softint_schedule(que->que_si);
2559 }
2560 } else
2561 ixgbe_enable_queue(adapter, que->msix);
2562
2563 return 1;
2564 } /* ixgbe_msix_que */
2565
2566 /************************************************************************
2567 * ixgbe_media_status - Media Ioctl callback
2568 *
2569 * Called whenever the user queries the status of
2570 * the interface using ifconfig.
2571 ************************************************************************/
2572 static void
2573 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2574 {
2575 struct adapter *adapter = ifp->if_softc;
2576 struct ixgbe_hw *hw = &adapter->hw;
2577 int layer;
2578
2579 INIT_DEBUGOUT("ixgbe_media_status: begin");
2580 IXGBE_CORE_LOCK(adapter);
2581 ixgbe_update_link_status(adapter);
2582
2583 ifmr->ifm_status = IFM_AVALID;
2584 ifmr->ifm_active = IFM_ETHER;
2585
2586 if (!adapter->link_active) {
2587 ifmr->ifm_active |= IFM_NONE;
2588 IXGBE_CORE_UNLOCK(adapter);
2589 return;
2590 }
2591
2592 ifmr->ifm_status |= IFM_ACTIVE;
2593 layer = adapter->phy_layer;
2594
2595 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2596 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2597 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2598 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2599 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2600 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2601 switch (adapter->link_speed) {
2602 case IXGBE_LINK_SPEED_10GB_FULL:
2603 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2604 break;
2605 case IXGBE_LINK_SPEED_5GB_FULL:
2606 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2607 break;
2608 case IXGBE_LINK_SPEED_2_5GB_FULL:
2609 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2610 break;
2611 case IXGBE_LINK_SPEED_1GB_FULL:
2612 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2613 break;
2614 case IXGBE_LINK_SPEED_100_FULL:
2615 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2616 break;
2617 case IXGBE_LINK_SPEED_10_FULL:
2618 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2619 break;
2620 }
2621 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2622 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2623 switch (adapter->link_speed) {
2624 case IXGBE_LINK_SPEED_10GB_FULL:
2625 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2626 break;
2627 }
2628 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2629 switch (adapter->link_speed) {
2630 case IXGBE_LINK_SPEED_10GB_FULL:
2631 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2632 break;
2633 case IXGBE_LINK_SPEED_1GB_FULL:
2634 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2635 break;
2636 }
2637 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2638 switch (adapter->link_speed) {
2639 case IXGBE_LINK_SPEED_10GB_FULL:
2640 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2641 break;
2642 case IXGBE_LINK_SPEED_1GB_FULL:
2643 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2644 break;
2645 }
2646 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2647 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2648 switch (adapter->link_speed) {
2649 case IXGBE_LINK_SPEED_10GB_FULL:
2650 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2651 break;
2652 case IXGBE_LINK_SPEED_1GB_FULL:
2653 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2654 break;
2655 }
2656 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2657 switch (adapter->link_speed) {
2658 case IXGBE_LINK_SPEED_10GB_FULL:
2659 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2660 break;
2661 }
2662 /*
2663 * XXX: These need to use the proper media types once
2664 * they're added.
2665 */
2666 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2667 switch (adapter->link_speed) {
2668 case IXGBE_LINK_SPEED_10GB_FULL:
2669 #ifndef IFM_ETH_XTYPE
2670 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2671 #else
2672 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2673 #endif
2674 break;
2675 case IXGBE_LINK_SPEED_2_5GB_FULL:
2676 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2677 break;
2678 case IXGBE_LINK_SPEED_1GB_FULL:
2679 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2680 break;
2681 }
2682 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2683 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2684 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2685 switch (adapter->link_speed) {
2686 case IXGBE_LINK_SPEED_10GB_FULL:
2687 #ifndef IFM_ETH_XTYPE
2688 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2689 #else
2690 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2691 #endif
2692 break;
2693 case IXGBE_LINK_SPEED_2_5GB_FULL:
2694 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2695 break;
2696 case IXGBE_LINK_SPEED_1GB_FULL:
2697 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2698 break;
2699 }
2700
2701 /* If nothing is recognized... */
2702 #if 0
2703 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2704 ifmr->ifm_active |= IFM_UNKNOWN;
2705 #endif
2706
2707 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2708
2709 /* Display current flow control setting used on link */
2710 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2711 hw->fc.current_mode == ixgbe_fc_full)
2712 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2713 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2714 hw->fc.current_mode == ixgbe_fc_full)
2715 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2716
2717 IXGBE_CORE_UNLOCK(adapter);
2718
2719 return;
2720 } /* ixgbe_media_status */
2721
2722 /************************************************************************
2723 * ixgbe_media_change - Media Ioctl callback
2724 *
2725 * Called when the user changes speed/duplex using
2726 * media/mediopt option with ifconfig.
2727 ************************************************************************/
2728 static int
2729 ixgbe_media_change(struct ifnet *ifp)
2730 {
2731 struct adapter *adapter = ifp->if_softc;
2732 struct ifmedia *ifm = &adapter->media;
2733 struct ixgbe_hw *hw = &adapter->hw;
2734 ixgbe_link_speed speed = 0;
2735 ixgbe_link_speed link_caps = 0;
2736 bool negotiate = false;
2737 s32 err = IXGBE_NOT_IMPLEMENTED;
2738
2739 INIT_DEBUGOUT("ixgbe_media_change: begin");
2740
2741 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2742 return (EINVAL);
2743
2744 if (hw->phy.media_type == ixgbe_media_type_backplane)
2745 return (ENODEV);
2746
2747 /*
2748 * We don't actually need to check against the supported
2749 * media types of the adapter; ifmedia will take care of
2750 * that for us.
2751 */
2752 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2753 case IFM_AUTO:
2754 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2755 &negotiate);
2756 if (err != IXGBE_SUCCESS) {
2757 device_printf(adapter->dev, "Unable to determine "
2758 "supported advertise speeds\n");
2759 return (ENODEV);
2760 }
2761 speed |= link_caps;
2762 break;
2763 case IFM_10G_T:
2764 case IFM_10G_LRM:
2765 case IFM_10G_LR:
2766 case IFM_10G_TWINAX:
2767 #ifndef IFM_ETH_XTYPE
2768 case IFM_10G_SR: /* KR, too */
2769 case IFM_10G_CX4: /* KX4 */
2770 #else
2771 case IFM_10G_KR:
2772 case IFM_10G_KX4:
2773 #endif
2774 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2775 break;
2776 case IFM_5000_T:
2777 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2778 break;
2779 case IFM_2500_T:
2780 case IFM_2500_KX:
2781 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2782 break;
2783 case IFM_1000_T:
2784 case IFM_1000_LX:
2785 case IFM_1000_SX:
2786 case IFM_1000_KX:
2787 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2788 break;
2789 case IFM_100_TX:
2790 speed |= IXGBE_LINK_SPEED_100_FULL;
2791 break;
2792 case IFM_10_T:
2793 speed |= IXGBE_LINK_SPEED_10_FULL;
2794 break;
2795 default:
2796 goto invalid;
2797 }
2798
2799 hw->mac.autotry_restart = TRUE;
2800 hw->mac.ops.setup_link(hw, speed, TRUE);
2801 adapter->advertise = 0;
2802 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2803 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2804 adapter->advertise |= 1 << 2;
2805 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
2806 adapter->advertise |= 1 << 1;
2807 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
2808 adapter->advertise |= 1 << 0;
2809 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
2810 adapter->advertise |= 1 << 3;
2811 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
2812 adapter->advertise |= 1 << 4;
2813 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
2814 adapter->advertise |= 1 << 5;
2815 }
2816
2817 return (0);
2818
2819 invalid:
2820 device_printf(adapter->dev, "Invalid media type!\n");
2821
2822 return (EINVAL);
2823 } /* ixgbe_media_change */
2824
2825 /************************************************************************
2826 * ixgbe_set_promisc
2827 ************************************************************************/
2828 static void
2829 ixgbe_set_promisc(struct adapter *adapter)
2830 {
2831 struct ifnet *ifp = adapter->ifp;
2832 int mcnt = 0;
2833 u32 rctl;
2834 struct ether_multi *enm;
2835 struct ether_multistep step;
2836 struct ethercom *ec = &adapter->osdep.ec;
2837
2838 KASSERT(mutex_owned(&adapter->core_mtx));
2839 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2840 rctl &= (~IXGBE_FCTRL_UPE);
2841 if (ifp->if_flags & IFF_ALLMULTI)
2842 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2843 else {
2844 ETHER_LOCK(ec);
2845 ETHER_FIRST_MULTI(step, ec, enm);
2846 while (enm != NULL) {
2847 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2848 break;
2849 mcnt++;
2850 ETHER_NEXT_MULTI(step, enm);
2851 }
2852 ETHER_UNLOCK(ec);
2853 }
2854 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2855 rctl &= (~IXGBE_FCTRL_MPE);
2856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2857
2858 if (ifp->if_flags & IFF_PROMISC) {
2859 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2860 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2861 } else if (ifp->if_flags & IFF_ALLMULTI) {
2862 rctl |= IXGBE_FCTRL_MPE;
2863 rctl &= ~IXGBE_FCTRL_UPE;
2864 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2865 }
2866 } /* ixgbe_set_promisc */
2867
2868 /************************************************************************
2869 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2870 ************************************************************************/
2871 static int
2872 ixgbe_msix_link(void *arg)
2873 {
2874 struct adapter *adapter = arg;
2875 struct ixgbe_hw *hw = &adapter->hw;
2876 u32 eicr, eicr_mask;
2877 s32 retval;
2878
2879 ++adapter->link_irq.ev_count;
2880
2881 /* Pause other interrupts */
2882 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2883
2884 /* First get the cause */
2885 /*
2886 * The specifications of 82598, 82599, X540 and X550 say EICS register
2887 * is write only. However, Linux says it is a workaround for silicon
2888 * errata to read EICS instead of EICR to get interrupt cause. It seems
2889 * there is a problem about read clear mechanism for EICR register.
2890 */
2891 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2892 /* Be sure the queue bits are not cleared */
2893 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2894 /* Clear interrupt with write */
2895 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2896
2897 /* Link status change */
2898 if (eicr & IXGBE_EICR_LSC) {
2899 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2900 softint_schedule(adapter->link_si);
2901 }
2902
2903 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2904 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2905 (eicr & IXGBE_EICR_FLOW_DIR)) {
2906 /* This is probably overkill :) */
2907 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
2908 return 1;
2909 /* Disable the interrupt */
2910 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2911 softint_schedule(adapter->fdir_si);
2912 }
2913
2914 if (eicr & IXGBE_EICR_ECC) {
2915 device_printf(adapter->dev,
2916 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
2917 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2918 }
2919
2920 /* Check for over temp condition */
2921 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2922 switch (adapter->hw.mac.type) {
2923 case ixgbe_mac_X550EM_a:
2924 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2925 break;
2926 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2927 IXGBE_EICR_GPI_SDP0_X550EM_a);
2928 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2929 IXGBE_EICR_GPI_SDP0_X550EM_a);
2930 retval = hw->phy.ops.check_overtemp(hw);
2931 if (retval != IXGBE_ERR_OVERTEMP)
2932 break;
2933 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2934 device_printf(adapter->dev, "System shutdown required!\n");
2935 break;
2936 default:
2937 if (!(eicr & IXGBE_EICR_TS))
2938 break;
2939 retval = hw->phy.ops.check_overtemp(hw);
2940 if (retval != IXGBE_ERR_OVERTEMP)
2941 break;
2942 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2943 device_printf(adapter->dev, "System shutdown required!\n");
2944 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2945 break;
2946 }
2947 }
2948
2949 /* Check for VF message */
2950 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2951 (eicr & IXGBE_EICR_MAILBOX))
2952 softint_schedule(adapter->mbx_si);
2953 }
2954
2955 if (ixgbe_is_sfp(hw)) {
2956 /* Pluggable optics-related interrupt */
2957 if (hw->mac.type >= ixgbe_mac_X540)
2958 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2959 else
2960 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2961
2962 if (eicr & eicr_mask) {
2963 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2964 softint_schedule(adapter->mod_si);
2965 }
2966
2967 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2968 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2969 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2970 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2971 softint_schedule(adapter->msf_si);
2972 }
2973 }
2974
2975 /* Check for fan failure */
2976 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2977 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2978 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2979 }
2980
2981 /* External PHY interrupt */
2982 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2983 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2984 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2985 softint_schedule(adapter->phy_si);
2986 }
2987
2988 /* Re-enable other interrupts */
2989 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2990 return 1;
2991 } /* ixgbe_msix_link */
2992
2993 static void
2994 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
2995 {
2996 struct adapter *adapter = que->adapter;
2997
2998 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2999 itr |= itr << 16;
3000 else
3001 itr |= IXGBE_EITR_CNT_WDIS;
3002
3003 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
3004 itr);
3005 }
3006
3007
3008 /************************************************************************
3009 * ixgbe_sysctl_interrupt_rate_handler
3010 ************************************************************************/
3011 static int
3012 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3013 {
3014 struct sysctlnode node = *rnode;
3015 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3016 struct adapter *adapter = que->adapter;
3017 uint32_t reg, usec, rate;
3018 int error;
3019
3020 if (que == NULL)
3021 return 0;
3022 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3023 usec = ((reg & 0x0FF8) >> 3);
3024 if (usec > 0)
3025 rate = 500000 / usec;
3026 else
3027 rate = 0;
3028 node.sysctl_data = &rate;
3029 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3030 if (error || newp == NULL)
3031 return error;
3032 reg &= ~0xfff; /* default, no limitation */
3033 if (rate > 0 && rate < 500000) {
3034 if (rate < 1000)
3035 rate = 1000;
3036 reg |= ((4000000/rate) & 0xff8);
3037 /*
3038 * When RSC is used, ITR interval must be larger than
3039 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3040 * The minimum value is always greater than 2us on 100M
3041 * (and 10M?(not documented)), but it's not on 1G and higher.
3042 */
3043 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3044 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3045 if ((adapter->num_queues > 1)
3046 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3047 return EINVAL;
3048 }
3049 ixgbe_max_interrupt_rate = rate;
3050 } else
3051 ixgbe_max_interrupt_rate = 0;
3052 ixgbe_eitr_write(que, reg);
3053
3054 return (0);
3055 } /* ixgbe_sysctl_interrupt_rate_handler */
3056
3057 const struct sysctlnode *
3058 ixgbe_sysctl_instance(struct adapter *adapter)
3059 {
3060 const char *dvname;
3061 struct sysctllog **log;
3062 int rc;
3063 const struct sysctlnode *rnode;
3064
3065 if (adapter->sysctltop != NULL)
3066 return adapter->sysctltop;
3067
3068 log = &adapter->sysctllog;
3069 dvname = device_xname(adapter->dev);
3070
3071 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3072 0, CTLTYPE_NODE, dvname,
3073 SYSCTL_DESCR("ixgbe information and settings"),
3074 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3075 goto err;
3076
3077 return rnode;
3078 err:
3079 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3080 return NULL;
3081 }
3082
3083 /************************************************************************
3084 * ixgbe_add_device_sysctls
3085 ************************************************************************/
3086 static void
3087 ixgbe_add_device_sysctls(struct adapter *adapter)
3088 {
3089 device_t dev = adapter->dev;
3090 struct ixgbe_hw *hw = &adapter->hw;
3091 struct sysctllog **log;
3092 const struct sysctlnode *rnode, *cnode;
3093
3094 log = &adapter->sysctllog;
3095
3096 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3097 aprint_error_dev(dev, "could not create sysctl root\n");
3098 return;
3099 }
3100
3101 if (sysctl_createv(log, 0, &rnode, &cnode,
3102 CTLFLAG_READONLY, CTLTYPE_INT,
3103 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3104 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3105 aprint_error_dev(dev, "could not create sysctl\n");
3106
3107 if (sysctl_createv(log, 0, &rnode, &cnode,
3108 CTLFLAG_READONLY, CTLTYPE_INT,
3109 "num_queues", SYSCTL_DESCR("Number of queues"),
3110 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3111 aprint_error_dev(dev, "could not create sysctl\n");
3112
3113 /* Sysctls for all devices */
3114 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3115 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3116 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3117 CTL_EOL) != 0)
3118 aprint_error_dev(dev, "could not create sysctl\n");
3119
3120 adapter->enable_aim = ixgbe_enable_aim;
3121 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3122 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3123 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3124 aprint_error_dev(dev, "could not create sysctl\n");
3125
3126 if (sysctl_createv(log, 0, &rnode, &cnode,
3127 CTLFLAG_READWRITE, CTLTYPE_INT,
3128 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3129 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3130 CTL_EOL) != 0)
3131 aprint_error_dev(dev, "could not create sysctl\n");
3132
3133 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3134 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3135 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3136 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3137 aprint_error_dev(dev, "could not create sysctl\n");
3138
3139 #ifdef IXGBE_DEBUG
3140 /* testing sysctls (for all devices) */
3141 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3142 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3143 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3144 CTL_EOL) != 0)
3145 aprint_error_dev(dev, "could not create sysctl\n");
3146
3147 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3148 CTLTYPE_STRING, "print_rss_config",
3149 SYSCTL_DESCR("Prints RSS Configuration"),
3150 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3151 CTL_EOL) != 0)
3152 aprint_error_dev(dev, "could not create sysctl\n");
3153 #endif
3154 /* for X550 series devices */
3155 if (hw->mac.type >= ixgbe_mac_X550)
3156 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3157 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3158 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3159 CTL_EOL) != 0)
3160 aprint_error_dev(dev, "could not create sysctl\n");
3161
3162 /* for WoL-capable devices */
3163 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3164 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3165 CTLTYPE_BOOL, "wol_enable",
3166 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3167 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3168 CTL_EOL) != 0)
3169 aprint_error_dev(dev, "could not create sysctl\n");
3170
3171 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3172 CTLTYPE_INT, "wufc",
3173 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3174 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3175 CTL_EOL) != 0)
3176 aprint_error_dev(dev, "could not create sysctl\n");
3177 }
3178
3179 /* for X552/X557-AT devices */
3180 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3181 const struct sysctlnode *phy_node;
3182
3183 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3184 "phy", SYSCTL_DESCR("External PHY sysctls"),
3185 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3186 aprint_error_dev(dev, "could not create sysctl\n");
3187 return;
3188 }
3189
3190 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3191 CTLTYPE_INT, "temp",
3192 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3193 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3194 CTL_EOL) != 0)
3195 aprint_error_dev(dev, "could not create sysctl\n");
3196
3197 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3198 CTLTYPE_INT, "overtemp_occurred",
3199 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3200 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3201 CTL_CREATE, CTL_EOL) != 0)
3202 aprint_error_dev(dev, "could not create sysctl\n");
3203 }
3204
3205 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3206 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3207 CTLTYPE_INT, "eee_state",
3208 SYSCTL_DESCR("EEE Power Save State"),
3209 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3210 CTL_EOL) != 0)
3211 aprint_error_dev(dev, "could not create sysctl\n");
3212 }
3213 } /* ixgbe_add_device_sysctls */
3214
3215 /************************************************************************
3216 * ixgbe_allocate_pci_resources
3217 ************************************************************************/
3218 static int
3219 ixgbe_allocate_pci_resources(struct adapter *adapter,
3220 const struct pci_attach_args *pa)
3221 {
3222 pcireg_t memtype;
3223 device_t dev = adapter->dev;
3224 bus_addr_t addr;
3225 int flags;
3226
3227 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3228 switch (memtype) {
3229 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3230 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3231 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3232 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3233 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3234 goto map_err;
3235 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3236 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3237 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3238 }
3239 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3240 adapter->osdep.mem_size, flags,
3241 &adapter->osdep.mem_bus_space_handle) != 0) {
3242 map_err:
3243 adapter->osdep.mem_size = 0;
3244 aprint_error_dev(dev, "unable to map BAR0\n");
3245 return ENXIO;
3246 }
3247 break;
3248 default:
3249 aprint_error_dev(dev, "unexpected type on BAR0\n");
3250 return ENXIO;
3251 }
3252
3253 return (0);
3254 } /* ixgbe_allocate_pci_resources */
3255
3256 static void
3257 ixgbe_free_softint(struct adapter *adapter)
3258 {
3259 struct ix_queue *que = adapter->queues;
3260 struct tx_ring *txr = adapter->tx_rings;
3261 int i;
3262
3263 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3264 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3265 if (txr->txr_si != NULL)
3266 softint_disestablish(txr->txr_si);
3267 }
3268 if (que->que_si != NULL)
3269 softint_disestablish(que->que_si);
3270 }
3271 if (adapter->txr_wq != NULL)
3272 workqueue_destroy(adapter->txr_wq);
3273 if (adapter->txr_wq_enqueued != NULL)
3274 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3275 if (adapter->que_wq != NULL)
3276 workqueue_destroy(adapter->que_wq);
3277
3278 /* Drain the Link queue */
3279 if (adapter->link_si != NULL) {
3280 softint_disestablish(adapter->link_si);
3281 adapter->link_si = NULL;
3282 }
3283 if (adapter->mod_si != NULL) {
3284 softint_disestablish(adapter->mod_si);
3285 adapter->mod_si = NULL;
3286 }
3287 if (adapter->msf_si != NULL) {
3288 softint_disestablish(adapter->msf_si);
3289 adapter->msf_si = NULL;
3290 }
3291 if (adapter->phy_si != NULL) {
3292 softint_disestablish(adapter->phy_si);
3293 adapter->phy_si = NULL;
3294 }
3295 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3296 if (adapter->fdir_si != NULL) {
3297 softint_disestablish(adapter->fdir_si);
3298 adapter->fdir_si = NULL;
3299 }
3300 }
3301 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3302 if (adapter->mbx_si != NULL) {
3303 softint_disestablish(adapter->mbx_si);
3304 adapter->mbx_si = NULL;
3305 }
3306 }
3307 } /* ixgbe_free_softint */
3308
3309 /************************************************************************
3310 * ixgbe_detach - Device removal routine
3311 *
3312 * Called when the driver is being removed.
3313 * Stops the adapter and deallocates all the resources
3314 * that were allocated for driver operation.
3315 *
3316 * return 0 on success, positive on failure
3317 ************************************************************************/
3318 static int
3319 ixgbe_detach(device_t dev, int flags)
3320 {
3321 struct adapter *adapter = device_private(dev);
3322 struct rx_ring *rxr = adapter->rx_rings;
3323 struct tx_ring *txr = adapter->tx_rings;
3324 struct ixgbe_hw *hw = &adapter->hw;
3325 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3326 u32 ctrl_ext;
3327
3328 INIT_DEBUGOUT("ixgbe_detach: begin");
3329 if (adapter->osdep.attached == false)
3330 return 0;
3331
3332 if (ixgbe_pci_iov_detach(dev) != 0) {
3333 device_printf(dev, "SR-IOV in use; detach first.\n");
3334 return (EBUSY);
3335 }
3336
3337 /* Stop the interface. Callouts are stopped in it. */
3338 ixgbe_ifstop(adapter->ifp, 1);
3339 #if NVLAN > 0
3340 /* Make sure VLANs are not using driver */
3341 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3342 ; /* nothing to do: no VLANs */
3343 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
3344 vlan_ifdetach(adapter->ifp);
3345 else {
3346 aprint_error_dev(dev, "VLANs in use, detach first\n");
3347 return (EBUSY);
3348 }
3349 #endif
3350
3351 pmf_device_deregister(dev);
3352
3353 ether_ifdetach(adapter->ifp);
3354 /* Stop the adapter */
3355 IXGBE_CORE_LOCK(adapter);
3356 ixgbe_setup_low_power_mode(adapter);
3357 IXGBE_CORE_UNLOCK(adapter);
3358
3359 ixgbe_free_softint(adapter);
3360
3361 /* let hardware know driver is unloading */
3362 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3363 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3364 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3365
3366 callout_halt(&adapter->timer, NULL);
3367
3368 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3369 netmap_detach(adapter->ifp);
3370
3371 ixgbe_free_pci_resources(adapter);
3372 #if 0 /* XXX the NetBSD port is probably missing something here */
3373 bus_generic_detach(dev);
3374 #endif
3375 if_detach(adapter->ifp);
3376 if_percpuq_destroy(adapter->ipq);
3377
3378 sysctl_teardown(&adapter->sysctllog);
3379 evcnt_detach(&adapter->handleq);
3380 evcnt_detach(&adapter->req);
3381 evcnt_detach(&adapter->efbig_tx_dma_setup);
3382 evcnt_detach(&adapter->mbuf_defrag_failed);
3383 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3384 evcnt_detach(&adapter->einval_tx_dma_setup);
3385 evcnt_detach(&adapter->other_tx_dma_setup);
3386 evcnt_detach(&adapter->eagain_tx_dma_setup);
3387 evcnt_detach(&adapter->enomem_tx_dma_setup);
3388 evcnt_detach(&adapter->watchdog_events);
3389 evcnt_detach(&adapter->tso_err);
3390 evcnt_detach(&adapter->link_irq);
3391
3392 txr = adapter->tx_rings;
3393 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3394 evcnt_detach(&adapter->queues[i].irqs);
3395 evcnt_detach(&txr->no_desc_avail);
3396 evcnt_detach(&txr->total_packets);
3397 evcnt_detach(&txr->tso_tx);
3398 #ifndef IXGBE_LEGACY_TX
3399 evcnt_detach(&txr->pcq_drops);
3400 #endif
3401
3402 if (i < __arraycount(stats->mpc)) {
3403 evcnt_detach(&stats->mpc[i]);
3404 if (hw->mac.type == ixgbe_mac_82598EB)
3405 evcnt_detach(&stats->rnbc[i]);
3406 }
3407 if (i < __arraycount(stats->pxontxc)) {
3408 evcnt_detach(&stats->pxontxc[i]);
3409 evcnt_detach(&stats->pxonrxc[i]);
3410 evcnt_detach(&stats->pxofftxc[i]);
3411 evcnt_detach(&stats->pxoffrxc[i]);
3412 evcnt_detach(&stats->pxon2offc[i]);
3413 }
3414 if (i < __arraycount(stats->qprc)) {
3415 evcnt_detach(&stats->qprc[i]);
3416 evcnt_detach(&stats->qptc[i]);
3417 evcnt_detach(&stats->qbrc[i]);
3418 evcnt_detach(&stats->qbtc[i]);
3419 evcnt_detach(&stats->qprdc[i]);
3420 }
3421
3422 evcnt_detach(&rxr->rx_packets);
3423 evcnt_detach(&rxr->rx_bytes);
3424 evcnt_detach(&rxr->rx_copies);
3425 evcnt_detach(&rxr->no_jmbuf);
3426 evcnt_detach(&rxr->rx_discarded);
3427 }
3428 evcnt_detach(&stats->ipcs);
3429 evcnt_detach(&stats->l4cs);
3430 evcnt_detach(&stats->ipcs_bad);
3431 evcnt_detach(&stats->l4cs_bad);
3432 evcnt_detach(&stats->intzero);
3433 evcnt_detach(&stats->legint);
3434 evcnt_detach(&stats->crcerrs);
3435 evcnt_detach(&stats->illerrc);
3436 evcnt_detach(&stats->errbc);
3437 evcnt_detach(&stats->mspdc);
3438 if (hw->mac.type >= ixgbe_mac_X550)
3439 evcnt_detach(&stats->mbsdc);
3440 evcnt_detach(&stats->mpctotal);
3441 evcnt_detach(&stats->mlfc);
3442 evcnt_detach(&stats->mrfc);
3443 evcnt_detach(&stats->rlec);
3444 evcnt_detach(&stats->lxontxc);
3445 evcnt_detach(&stats->lxonrxc);
3446 evcnt_detach(&stats->lxofftxc);
3447 evcnt_detach(&stats->lxoffrxc);
3448
3449 /* Packet Reception Stats */
3450 evcnt_detach(&stats->tor);
3451 evcnt_detach(&stats->gorc);
3452 evcnt_detach(&stats->tpr);
3453 evcnt_detach(&stats->gprc);
3454 evcnt_detach(&stats->mprc);
3455 evcnt_detach(&stats->bprc);
3456 evcnt_detach(&stats->prc64);
3457 evcnt_detach(&stats->prc127);
3458 evcnt_detach(&stats->prc255);
3459 evcnt_detach(&stats->prc511);
3460 evcnt_detach(&stats->prc1023);
3461 evcnt_detach(&stats->prc1522);
3462 evcnt_detach(&stats->ruc);
3463 evcnt_detach(&stats->rfc);
3464 evcnt_detach(&stats->roc);
3465 evcnt_detach(&stats->rjc);
3466 evcnt_detach(&stats->mngprc);
3467 evcnt_detach(&stats->mngpdc);
3468 evcnt_detach(&stats->xec);
3469
3470 /* Packet Transmission Stats */
3471 evcnt_detach(&stats->gotc);
3472 evcnt_detach(&stats->tpt);
3473 evcnt_detach(&stats->gptc);
3474 evcnt_detach(&stats->bptc);
3475 evcnt_detach(&stats->mptc);
3476 evcnt_detach(&stats->mngptc);
3477 evcnt_detach(&stats->ptc64);
3478 evcnt_detach(&stats->ptc127);
3479 evcnt_detach(&stats->ptc255);
3480 evcnt_detach(&stats->ptc511);
3481 evcnt_detach(&stats->ptc1023);
3482 evcnt_detach(&stats->ptc1522);
3483
3484 ixgbe_free_transmit_structures(adapter);
3485 ixgbe_free_receive_structures(adapter);
3486 for (int i = 0; i < adapter->num_queues; i++) {
3487 struct ix_queue * que = &adapter->queues[i];
3488 mutex_destroy(&que->im_mtx);
3489 }
3490 free(adapter->queues, M_DEVBUF);
3491 free(adapter->mta, M_DEVBUF);
3492
3493 IXGBE_CORE_LOCK_DESTROY(adapter);
3494
3495 return (0);
3496 } /* ixgbe_detach */
3497
3498 /************************************************************************
3499 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3500 *
3501 * Prepare the adapter/port for LPLU and/or WoL
3502 ************************************************************************/
3503 static int
3504 ixgbe_setup_low_power_mode(struct adapter *adapter)
3505 {
3506 struct ixgbe_hw *hw = &adapter->hw;
3507 device_t dev = adapter->dev;
3508 s32 error = 0;
3509
3510 KASSERT(mutex_owned(&adapter->core_mtx));
3511
3512 /* Limit power management flow to X550EM baseT */
3513 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3514 hw->phy.ops.enter_lplu) {
3515 /* X550EM baseT adapters need a special LPLU flow */
3516 hw->phy.reset_disable = true;
3517 ixgbe_stop(adapter);
3518 error = hw->phy.ops.enter_lplu(hw);
3519 if (error)
3520 device_printf(dev,
3521 "Error entering LPLU: %d\n", error);
3522 hw->phy.reset_disable = false;
3523 } else {
3524 /* Just stop for other adapters */
3525 ixgbe_stop(adapter);
3526 }
3527
3528 if (!hw->wol_enabled) {
3529 ixgbe_set_phy_power(hw, FALSE);
3530 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3531 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3532 } else {
3533 /* Turn off support for APM wakeup. (Using ACPI instead) */
3534 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3535 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3536
3537 /*
3538 * Clear Wake Up Status register to prevent any previous wakeup
3539 * events from waking us up immediately after we suspend.
3540 */
3541 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3542
3543 /*
3544 * Program the Wakeup Filter Control register with user filter
3545 * settings
3546 */
3547 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3548
3549 /* Enable wakeups and power management in Wakeup Control */
3550 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3551 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3552
3553 }
3554
3555 return error;
3556 } /* ixgbe_setup_low_power_mode */
3557
3558 /************************************************************************
3559 * ixgbe_shutdown - Shutdown entry point
3560 ************************************************************************/
3561 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3562 static int
3563 ixgbe_shutdown(device_t dev)
3564 {
3565 struct adapter *adapter = device_private(dev);
3566 int error = 0;
3567
3568 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3569
3570 IXGBE_CORE_LOCK(adapter);
3571 error = ixgbe_setup_low_power_mode(adapter);
3572 IXGBE_CORE_UNLOCK(adapter);
3573
3574 return (error);
3575 } /* ixgbe_shutdown */
3576 #endif
3577
3578 /************************************************************************
3579 * ixgbe_suspend
3580 *
3581 * From D0 to D3
3582 ************************************************************************/
3583 static bool
3584 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3585 {
3586 struct adapter *adapter = device_private(dev);
3587 int error = 0;
3588
3589 INIT_DEBUGOUT("ixgbe_suspend: begin");
3590
3591 IXGBE_CORE_LOCK(adapter);
3592
3593 error = ixgbe_setup_low_power_mode(adapter);
3594
3595 IXGBE_CORE_UNLOCK(adapter);
3596
3597 return (error);
3598 } /* ixgbe_suspend */
3599
3600 /************************************************************************
3601 * ixgbe_resume
3602 *
3603 * From D3 to D0
3604 ************************************************************************/
3605 static bool
3606 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3607 {
3608 struct adapter *adapter = device_private(dev);
3609 struct ifnet *ifp = adapter->ifp;
3610 struct ixgbe_hw *hw = &adapter->hw;
3611 u32 wus;
3612
3613 INIT_DEBUGOUT("ixgbe_resume: begin");
3614
3615 IXGBE_CORE_LOCK(adapter);
3616
3617 /* Read & clear WUS register */
3618 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3619 if (wus)
3620 device_printf(dev, "Woken up by (WUS): %#010x\n",
3621 IXGBE_READ_REG(hw, IXGBE_WUS));
3622 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3623 /* And clear WUFC until next low-power transition */
3624 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3625
3626 /*
3627 * Required after D3->D0 transition;
3628 * will re-advertise all previous advertised speeds
3629 */
3630 if (ifp->if_flags & IFF_UP)
3631 ixgbe_init_locked(adapter);
3632
3633 IXGBE_CORE_UNLOCK(adapter);
3634
3635 return true;
3636 } /* ixgbe_resume */
3637
3638 /*
3639 * Set the various hardware offload abilities.
3640 *
3641 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3642 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3643 * mbuf offload flags the driver will understand.
3644 */
3645 static void
3646 ixgbe_set_if_hwassist(struct adapter *adapter)
3647 {
3648 /* XXX */
3649 }
3650
3651 /************************************************************************
3652 * ixgbe_init_locked - Init entry point
3653 *
3654 * Used in two ways: It is used by the stack as an init
3655 * entry point in network interface structure. It is also
3656 * used by the driver as a hw/sw initialization routine to
3657 * get to a consistent state.
3658 *
3659 * return 0 on success, positive on failure
3660 ************************************************************************/
3661 static void
3662 ixgbe_init_locked(struct adapter *adapter)
3663 {
3664 struct ifnet *ifp = adapter->ifp;
3665 device_t dev = adapter->dev;
3666 struct ixgbe_hw *hw = &adapter->hw;
3667 struct tx_ring *txr;
3668 struct rx_ring *rxr;
3669 u32 txdctl, mhadd;
3670 u32 rxdctl, rxctrl;
3671 u32 ctrl_ext;
3672 int err = 0;
3673
3674 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3675
3676 KASSERT(mutex_owned(&adapter->core_mtx));
3677 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3678
3679 hw->adapter_stopped = FALSE;
3680 ixgbe_stop_adapter(hw);
3681 callout_stop(&adapter->timer);
3682
3683 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3684 adapter->max_frame_size =
3685 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3686
3687 /* Queue indices may change with IOV mode */
3688 ixgbe_align_all_queue_indices(adapter);
3689
3690 /* reprogram the RAR[0] in case user changed it. */
3691 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3692
3693 /* Get the latest mac address, User can use a LAA */
3694 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3695 IXGBE_ETH_LENGTH_OF_ADDRESS);
3696 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3697 hw->addr_ctrl.rar_used_count = 1;
3698
3699 /* Set hardware offload abilities from ifnet flags */
3700 ixgbe_set_if_hwassist(adapter);
3701
3702 /* Prepare transmit descriptors and buffers */
3703 if (ixgbe_setup_transmit_structures(adapter)) {
3704 device_printf(dev, "Could not setup transmit structures\n");
3705 ixgbe_stop(adapter);
3706 return;
3707 }
3708
3709 ixgbe_init_hw(hw);
3710 ixgbe_initialize_iov(adapter);
3711 ixgbe_initialize_transmit_units(adapter);
3712
3713 /* Setup Multicast table */
3714 ixgbe_set_multi(adapter);
3715
3716 /* Determine the correct mbuf pool, based on frame size */
3717 if (adapter->max_frame_size <= MCLBYTES)
3718 adapter->rx_mbuf_sz = MCLBYTES;
3719 else
3720 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3721
3722 /* Prepare receive descriptors and buffers */
3723 if (ixgbe_setup_receive_structures(adapter)) {
3724 device_printf(dev, "Could not setup receive structures\n");
3725 ixgbe_stop(adapter);
3726 return;
3727 }
3728
3729 /* Configure RX settings */
3730 ixgbe_initialize_receive_units(adapter);
3731
3732 /* Enable SDP & MSI-X interrupts based on adapter */
3733 ixgbe_config_gpie(adapter);
3734
3735 /* Set MTU size */
3736 if (ifp->if_mtu > ETHERMTU) {
3737 /* aka IXGBE_MAXFRS on 82599 and newer */
3738 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3739 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3740 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3741 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3742 }
3743
3744 /* Now enable all the queues */
3745 for (int i = 0; i < adapter->num_queues; i++) {
3746 txr = &adapter->tx_rings[i];
3747 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3748 txdctl |= IXGBE_TXDCTL_ENABLE;
3749 /* Set WTHRESH to 8, burst writeback */
3750 txdctl |= (8 << 16);
3751 /*
3752 * When the internal queue falls below PTHRESH (32),
3753 * start prefetching as long as there are at least
3754 * HTHRESH (1) buffers ready. The values are taken
3755 * from the Intel linux driver 3.8.21.
3756 * Prefetching enables tx line rate even with 1 queue.
3757 */
3758 txdctl |= (32 << 0) | (1 << 8);
3759 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3760 }
3761
3762 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
3763 rxr = &adapter->rx_rings[i];
3764 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3765 if (hw->mac.type == ixgbe_mac_82598EB) {
3766 /*
3767 * PTHRESH = 21
3768 * HTHRESH = 4
3769 * WTHRESH = 8
3770 */
3771 rxdctl &= ~0x3FFFFF;
3772 rxdctl |= 0x080420;
3773 }
3774 rxdctl |= IXGBE_RXDCTL_ENABLE;
3775 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3776 for (; j < 10; j++) {
3777 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3778 IXGBE_RXDCTL_ENABLE)
3779 break;
3780 else
3781 msec_delay(1);
3782 }
3783 wmb();
3784
3785 /*
3786 * In netmap mode, we must preserve the buffers made
3787 * available to userspace before the if_init()
3788 * (this is true by default on the TX side, because
3789 * init makes all buffers available to userspace).
3790 *
3791 * netmap_reset() and the device specific routines
3792 * (e.g. ixgbe_setup_receive_rings()) map these
3793 * buffers at the end of the NIC ring, so here we
3794 * must set the RDT (tail) register to make sure
3795 * they are not overwritten.
3796 *
3797 * In this driver the NIC ring starts at RDH = 0,
3798 * RDT points to the last slot available for reception (?),
3799 * so RDT = num_rx_desc - 1 means the whole ring is available.
3800 */
3801 #ifdef DEV_NETMAP
3802 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3803 (ifp->if_capenable & IFCAP_NETMAP)) {
3804 struct netmap_adapter *na = NA(adapter->ifp);
3805 struct netmap_kring *kring = &na->rx_rings[i];
3806 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3807
3808 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3809 } else
3810 #endif /* DEV_NETMAP */
3811 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3812 adapter->num_rx_desc - 1);
3813 }
3814
3815 /* Enable Receive engine */
3816 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3817 if (hw->mac.type == ixgbe_mac_82598EB)
3818 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3819 rxctrl |= IXGBE_RXCTRL_RXEN;
3820 ixgbe_enable_rx_dma(hw, rxctrl);
3821
3822 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3823
3824 /* Set up MSI-X routing */
3825 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3826 ixgbe_configure_ivars(adapter);
3827 /* Set up auto-mask */
3828 if (hw->mac.type == ixgbe_mac_82598EB)
3829 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3830 else {
3831 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3832 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3833 }
3834 } else { /* Simple settings for Legacy/MSI */
3835 ixgbe_set_ivar(adapter, 0, 0, 0);
3836 ixgbe_set_ivar(adapter, 0, 0, 1);
3837 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3838 }
3839
3840 ixgbe_init_fdir(adapter);
3841
3842 /*
3843 * Check on any SFP devices that
3844 * need to be kick-started
3845 */
3846 if (hw->phy.type == ixgbe_phy_none) {
3847 err = hw->phy.ops.identify(hw);
3848 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3849 device_printf(dev,
3850 "Unsupported SFP+ module type was detected.\n");
3851 return;
3852 }
3853 }
3854
3855 /* Set moderation on the Link interrupt */
3856 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3857
3858 /* Config/Enable Link */
3859 ixgbe_config_link(adapter);
3860
3861 /* Hardware Packet Buffer & Flow Control setup */
3862 ixgbe_config_delay_values(adapter);
3863
3864 /* Initialize the FC settings */
3865 ixgbe_start_hw(hw);
3866
3867 /* Set up VLAN support and filter */
3868 ixgbe_setup_vlan_hw_support(adapter);
3869
3870 /* Setup DMA Coalescing */
3871 ixgbe_config_dmac(adapter);
3872
3873 /* And now turn on interrupts */
3874 ixgbe_enable_intr(adapter);
3875
3876 /* Enable the use of the MBX by the VF's */
3877 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3878 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3879 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3880 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3881 }
3882
3883 /* Update saved flags. See ixgbe_ifflags_cb() */
3884 adapter->if_flags = ifp->if_flags;
3885
3886 /* Now inform the stack we're ready */
3887 ifp->if_flags |= IFF_RUNNING;
3888
3889 return;
3890 } /* ixgbe_init_locked */
3891
3892 /************************************************************************
3893 * ixgbe_init
3894 ************************************************************************/
3895 static int
3896 ixgbe_init(struct ifnet *ifp)
3897 {
3898 struct adapter *adapter = ifp->if_softc;
3899
3900 IXGBE_CORE_LOCK(adapter);
3901 ixgbe_init_locked(adapter);
3902 IXGBE_CORE_UNLOCK(adapter);
3903
3904 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
3905 } /* ixgbe_init */
3906
3907 /************************************************************************
3908 * ixgbe_set_ivar
3909 *
3910 * Setup the correct IVAR register for a particular MSI-X interrupt
3911 * (yes this is all very magic and confusing :)
3912 * - entry is the register array entry
3913 * - vector is the MSI-X vector for this queue
3914 * - type is RX/TX/MISC
3915 ************************************************************************/
3916 static void
3917 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3918 {
3919 struct ixgbe_hw *hw = &adapter->hw;
3920 u32 ivar, index;
3921
3922 vector |= IXGBE_IVAR_ALLOC_VAL;
3923
3924 switch (hw->mac.type) {
3925
3926 case ixgbe_mac_82598EB:
3927 if (type == -1)
3928 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3929 else
3930 entry += (type * 64);
3931 index = (entry >> 2) & 0x1F;
3932 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3933 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3934 ivar |= (vector << (8 * (entry & 0x3)));
3935 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3936 break;
3937
3938 case ixgbe_mac_82599EB:
3939 case ixgbe_mac_X540:
3940 case ixgbe_mac_X550:
3941 case ixgbe_mac_X550EM_x:
3942 case ixgbe_mac_X550EM_a:
3943 if (type == -1) { /* MISC IVAR */
3944 index = (entry & 1) * 8;
3945 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3946 ivar &= ~(0xFF << index);
3947 ivar |= (vector << index);
3948 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3949 } else { /* RX/TX IVARS */
3950 index = (16 * (entry & 1)) + (8 * type);
3951 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3952 ivar &= ~(0xFF << index);
3953 ivar |= (vector << index);
3954 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3955 }
3956
3957 default:
3958 break;
3959 }
3960 } /* ixgbe_set_ivar */
3961
3962 /************************************************************************
3963 * ixgbe_configure_ivars
3964 ************************************************************************/
3965 static void
3966 ixgbe_configure_ivars(struct adapter *adapter)
3967 {
3968 struct ix_queue *que = adapter->queues;
3969 u32 newitr;
3970
3971 if (ixgbe_max_interrupt_rate > 0)
3972 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3973 else {
3974 /*
3975 * Disable DMA coalescing if interrupt moderation is
3976 * disabled.
3977 */
3978 adapter->dmac = 0;
3979 newitr = 0;
3980 }
3981
3982 for (int i = 0; i < adapter->num_queues; i++, que++) {
3983 struct rx_ring *rxr = &adapter->rx_rings[i];
3984 struct tx_ring *txr = &adapter->tx_rings[i];
3985 /* First the RX queue entry */
3986 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3987 /* ... and the TX */
3988 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3989 /* Set an Initial EITR value */
3990 ixgbe_eitr_write(que, newitr);
3991 }
3992
3993 /* For the Link interrupt */
3994 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3995 } /* ixgbe_configure_ivars */
3996
3997 /************************************************************************
3998 * ixgbe_config_gpie
3999 ************************************************************************/
4000 static void
4001 ixgbe_config_gpie(struct adapter *adapter)
4002 {
4003 struct ixgbe_hw *hw = &adapter->hw;
4004 u32 gpie;
4005
4006 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4007
4008 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4009 /* Enable Enhanced MSI-X mode */
4010 gpie |= IXGBE_GPIE_MSIX_MODE
4011 | IXGBE_GPIE_EIAME
4012 | IXGBE_GPIE_PBA_SUPPORT
4013 | IXGBE_GPIE_OCD;
4014 }
4015
4016 /* Fan Failure Interrupt */
4017 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4018 gpie |= IXGBE_SDP1_GPIEN;
4019
4020 /* Thermal Sensor Interrupt */
4021 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4022 gpie |= IXGBE_SDP0_GPIEN_X540;
4023
4024 /* Link detection */
4025 switch (hw->mac.type) {
4026 case ixgbe_mac_82599EB:
4027 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4028 break;
4029 case ixgbe_mac_X550EM_x:
4030 case ixgbe_mac_X550EM_a:
4031 gpie |= IXGBE_SDP0_GPIEN_X540;
4032 break;
4033 default:
4034 break;
4035 }
4036
4037 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4038
4039 return;
4040 } /* ixgbe_config_gpie */
4041
4042 /************************************************************************
4043 * ixgbe_config_delay_values
4044 *
4045 * Requires adapter->max_frame_size to be set.
4046 ************************************************************************/
4047 static void
4048 ixgbe_config_delay_values(struct adapter *adapter)
4049 {
4050 struct ixgbe_hw *hw = &adapter->hw;
4051 u32 rxpb, frame, size, tmp;
4052
4053 frame = adapter->max_frame_size;
4054
4055 /* Calculate High Water */
4056 switch (hw->mac.type) {
4057 case ixgbe_mac_X540:
4058 case ixgbe_mac_X550:
4059 case ixgbe_mac_X550EM_x:
4060 case ixgbe_mac_X550EM_a:
4061 tmp = IXGBE_DV_X540(frame, frame);
4062 break;
4063 default:
4064 tmp = IXGBE_DV(frame, frame);
4065 break;
4066 }
4067 size = IXGBE_BT2KB(tmp);
4068 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4069 hw->fc.high_water[0] = rxpb - size;
4070
4071 /* Now calculate Low Water */
4072 switch (hw->mac.type) {
4073 case ixgbe_mac_X540:
4074 case ixgbe_mac_X550:
4075 case ixgbe_mac_X550EM_x:
4076 case ixgbe_mac_X550EM_a:
4077 tmp = IXGBE_LOW_DV_X540(frame);
4078 break;
4079 default:
4080 tmp = IXGBE_LOW_DV(frame);
4081 break;
4082 }
4083 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4084
4085 hw->fc.pause_time = IXGBE_FC_PAUSE;
4086 hw->fc.send_xon = TRUE;
4087 } /* ixgbe_config_delay_values */
4088
4089 /************************************************************************
4090 * ixgbe_set_multi - Multicast Update
4091 *
4092 * Called whenever multicast address list is updated.
4093 ************************************************************************/
4094 static void
4095 ixgbe_set_multi(struct adapter *adapter)
4096 {
4097 struct ixgbe_mc_addr *mta;
4098 struct ifnet *ifp = adapter->ifp;
4099 u8 *update_ptr;
4100 int mcnt = 0;
4101 u32 fctrl;
4102 struct ethercom *ec = &adapter->osdep.ec;
4103 struct ether_multi *enm;
4104 struct ether_multistep step;
4105
4106 KASSERT(mutex_owned(&adapter->core_mtx));
4107 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
4108
4109 mta = adapter->mta;
4110 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4111
4112 ifp->if_flags &= ~IFF_ALLMULTI;
4113 ETHER_LOCK(ec);
4114 ETHER_FIRST_MULTI(step, ec, enm);
4115 while (enm != NULL) {
4116 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4117 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4118 ETHER_ADDR_LEN) != 0)) {
4119 ifp->if_flags |= IFF_ALLMULTI;
4120 break;
4121 }
4122 bcopy(enm->enm_addrlo,
4123 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4124 mta[mcnt].vmdq = adapter->pool;
4125 mcnt++;
4126 ETHER_NEXT_MULTI(step, enm);
4127 }
4128 ETHER_UNLOCK(ec);
4129
4130 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4131 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4132 if (ifp->if_flags & IFF_PROMISC)
4133 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4134 else if (ifp->if_flags & IFF_ALLMULTI) {
4135 fctrl |= IXGBE_FCTRL_MPE;
4136 }
4137
4138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4139
4140 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4141 update_ptr = (u8 *)mta;
4142 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4143 ixgbe_mc_array_itr, TRUE);
4144 }
4145
4146 return;
4147 } /* ixgbe_set_multi */
4148
4149 /************************************************************************
4150 * ixgbe_mc_array_itr
4151 *
4152 * An iterator function needed by the multicast shared code.
4153 * It feeds the shared code routine the addresses in the
4154 * array of ixgbe_set_multi() one by one.
4155 ************************************************************************/
4156 static u8 *
4157 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4158 {
4159 struct ixgbe_mc_addr *mta;
4160
4161 mta = (struct ixgbe_mc_addr *)*update_ptr;
4162 *vmdq = mta->vmdq;
4163
4164 *update_ptr = (u8*)(mta + 1);
4165
4166 return (mta->addr);
4167 } /* ixgbe_mc_array_itr */
4168
4169 /************************************************************************
4170 * ixgbe_local_timer - Timer routine
4171 *
4172 * Checks for link status, updates statistics,
4173 * and runs the watchdog check.
4174 ************************************************************************/
4175 static void
4176 ixgbe_local_timer(void *arg)
4177 {
4178 struct adapter *adapter = arg;
4179
4180 IXGBE_CORE_LOCK(adapter);
4181 ixgbe_local_timer1(adapter);
4182 IXGBE_CORE_UNLOCK(adapter);
4183 }
4184
4185 static void
4186 ixgbe_local_timer1(void *arg)
4187 {
4188 struct adapter *adapter = arg;
4189 device_t dev = adapter->dev;
4190 struct ix_queue *que = adapter->queues;
4191 u64 queues = 0;
4192 int hung = 0;
4193
4194 KASSERT(mutex_owned(&adapter->core_mtx));
4195
4196 /* Check for pluggable optics */
4197 if (adapter->sfp_probe)
4198 if (!ixgbe_sfp_probe(adapter))
4199 goto out; /* Nothing to do */
4200
4201 ixgbe_update_link_status(adapter);
4202 ixgbe_update_stats_counters(adapter);
4203
4204 /*
4205 * Check the TX queues status
4206 * - mark hung queues so we don't schedule on them
4207 * - watchdog only if all queues show hung
4208 */
4209 for (int i = 0; i < adapter->num_queues; i++, que++) {
4210 /* Keep track of queues with work for soft irq */
4211 if (que->txr->busy)
4212 queues |= ((u64)1 << que->me);
4213 /*
4214 * Each time txeof runs without cleaning, but there
4215 * are uncleaned descriptors it increments busy. If
4216 * we get to the MAX we declare it hung.
4217 */
4218 if (que->busy == IXGBE_QUEUE_HUNG) {
4219 ++hung;
4220 /* Mark the queue as inactive */
4221 adapter->active_queues &= ~((u64)1 << que->me);
4222 continue;
4223 } else {
4224 /* Check if we've come back from hung */
4225 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
4226 adapter->active_queues |= ((u64)1 << que->me);
4227 }
4228 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4229 device_printf(dev,
4230 "Warning queue %d appears to be hung!\n", i);
4231 que->txr->busy = IXGBE_QUEUE_HUNG;
4232 ++hung;
4233 }
4234 }
4235
4236 /* Only truely watchdog if all queues show hung */
4237 if (hung == adapter->num_queues)
4238 goto watchdog;
4239 else if (queues != 0) { /* Force an IRQ on queues with work */
4240 ixgbe_rearm_queues(adapter, queues);
4241 }
4242
4243 out:
4244 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4245 return;
4246
4247 watchdog:
4248 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4249 adapter->ifp->if_flags &= ~IFF_RUNNING;
4250 adapter->watchdog_events.ev_count++;
4251 ixgbe_init_locked(adapter);
4252 } /* ixgbe_local_timer */
4253
4254 /************************************************************************
4255 * ixgbe_sfp_probe
4256 *
4257 * Determine if a port had optics inserted.
4258 ************************************************************************/
4259 static bool
4260 ixgbe_sfp_probe(struct adapter *adapter)
4261 {
4262 struct ixgbe_hw *hw = &adapter->hw;
4263 device_t dev = adapter->dev;
4264 bool result = FALSE;
4265
4266 if ((hw->phy.type == ixgbe_phy_nl) &&
4267 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4268 s32 ret = hw->phy.ops.identify_sfp(hw);
4269 if (ret)
4270 goto out;
4271 ret = hw->phy.ops.reset(hw);
4272 adapter->sfp_probe = FALSE;
4273 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4274 device_printf(dev,"Unsupported SFP+ module detected!");
4275 device_printf(dev,
4276 "Reload driver with supported module.\n");
4277 goto out;
4278 } else
4279 device_printf(dev, "SFP+ module detected!\n");
4280 /* We now have supported optics */
4281 result = TRUE;
4282 }
4283 out:
4284
4285 return (result);
4286 } /* ixgbe_sfp_probe */
4287
4288 /************************************************************************
4289 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4290 ************************************************************************/
4291 static void
4292 ixgbe_handle_mod(void *context)
4293 {
4294 struct adapter *adapter = context;
4295 struct ixgbe_hw *hw = &adapter->hw;
4296 device_t dev = adapter->dev;
4297 u32 err, cage_full = 0;
4298
4299 if (adapter->hw.need_crosstalk_fix) {
4300 switch (hw->mac.type) {
4301 case ixgbe_mac_82599EB:
4302 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4303 IXGBE_ESDP_SDP2;
4304 break;
4305 case ixgbe_mac_X550EM_x:
4306 case ixgbe_mac_X550EM_a:
4307 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4308 IXGBE_ESDP_SDP0;
4309 break;
4310 default:
4311 break;
4312 }
4313
4314 if (!cage_full)
4315 return;
4316 }
4317
4318 err = hw->phy.ops.identify_sfp(hw);
4319 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4320 device_printf(dev,
4321 "Unsupported SFP+ module type was detected.\n");
4322 return;
4323 }
4324
4325 err = hw->mac.ops.setup_sfp(hw);
4326 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4327 device_printf(dev,
4328 "Setup failure - unsupported SFP+ module type.\n");
4329 return;
4330 }
4331 softint_schedule(adapter->msf_si);
4332 } /* ixgbe_handle_mod */
4333
4334
4335 /************************************************************************
4336 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4337 ************************************************************************/
4338 static void
4339 ixgbe_handle_msf(void *context)
4340 {
4341 struct adapter *adapter = context;
4342 struct ixgbe_hw *hw = &adapter->hw;
4343 u32 autoneg;
4344 bool negotiate;
4345
4346 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4347 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4348
4349 autoneg = hw->phy.autoneg_advertised;
4350 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4351 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4352 else
4353 negotiate = 0;
4354 if (hw->mac.ops.setup_link)
4355 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4356
4357 /* Adjust media types shown in ifconfig */
4358 ifmedia_removeall(&adapter->media);
4359 ixgbe_add_media_types(adapter);
4360 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4361 } /* ixgbe_handle_msf */
4362
4363 /************************************************************************
4364 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4365 ************************************************************************/
4366 static void
4367 ixgbe_handle_phy(void *context)
4368 {
4369 struct adapter *adapter = context;
4370 struct ixgbe_hw *hw = &adapter->hw;
4371 int error;
4372
4373 error = hw->phy.ops.handle_lasi(hw);
4374 if (error == IXGBE_ERR_OVERTEMP)
4375 device_printf(adapter->dev,
4376 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4377 " PHY will downshift to lower power state!\n");
4378 else if (error)
4379 device_printf(adapter->dev,
4380 "Error handling LASI interrupt: %d\n", error);
4381 } /* ixgbe_handle_phy */
4382
4383 static void
4384 ixgbe_ifstop(struct ifnet *ifp, int disable)
4385 {
4386 struct adapter *adapter = ifp->if_softc;
4387
4388 IXGBE_CORE_LOCK(adapter);
4389 ixgbe_stop(adapter);
4390 IXGBE_CORE_UNLOCK(adapter);
4391 }
4392
4393 /************************************************************************
4394 * ixgbe_stop - Stop the hardware
4395 *
4396 * Disables all traffic on the adapter by issuing a
4397 * global reset on the MAC and deallocates TX/RX buffers.
4398 ************************************************************************/
4399 static void
4400 ixgbe_stop(void *arg)
4401 {
4402 struct ifnet *ifp;
4403 struct adapter *adapter = arg;
4404 struct ixgbe_hw *hw = &adapter->hw;
4405
4406 ifp = adapter->ifp;
4407
4408 KASSERT(mutex_owned(&adapter->core_mtx));
4409
4410 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4411 ixgbe_disable_intr(adapter);
4412 callout_stop(&adapter->timer);
4413
4414 /* Let the stack know...*/
4415 ifp->if_flags &= ~IFF_RUNNING;
4416
4417 ixgbe_reset_hw(hw);
4418 hw->adapter_stopped = FALSE;
4419 ixgbe_stop_adapter(hw);
4420 if (hw->mac.type == ixgbe_mac_82599EB)
4421 ixgbe_stop_mac_link_on_d3_82599(hw);
4422 /* Turn off the laser - noop with no optics */
4423 ixgbe_disable_tx_laser(hw);
4424
4425 /* Update the stack */
4426 adapter->link_up = FALSE;
4427 ixgbe_update_link_status(adapter);
4428
4429 /* reprogram the RAR[0] in case user changed it. */
4430 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4431
4432 return;
4433 } /* ixgbe_stop */
4434
4435 /************************************************************************
4436 * ixgbe_update_link_status - Update OS on link state
4437 *
4438 * Note: Only updates the OS on the cached link state.
4439 * The real check of the hardware only happens with
4440 * a link interrupt.
4441 ************************************************************************/
4442 static void
4443 ixgbe_update_link_status(struct adapter *adapter)
4444 {
4445 struct ifnet *ifp = adapter->ifp;
4446 device_t dev = adapter->dev;
4447 struct ixgbe_hw *hw = &adapter->hw;
4448
4449 if (adapter->link_up) {
4450 if (adapter->link_active == FALSE) {
4451 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4452 /*
4453 * Discard count for both MAC Local Fault and
4454 * Remote Fault because those registers are
4455 * valid only when the link speed is up and
4456 * 10Gbps.
4457 */
4458 IXGBE_READ_REG(hw, IXGBE_MLFC);
4459 IXGBE_READ_REG(hw, IXGBE_MRFC);
4460 }
4461
4462 if (bootverbose) {
4463 const char *bpsmsg;
4464
4465 switch (adapter->link_speed) {
4466 case IXGBE_LINK_SPEED_10GB_FULL:
4467 bpsmsg = "10 Gbps";
4468 break;
4469 case IXGBE_LINK_SPEED_5GB_FULL:
4470 bpsmsg = "5 Gbps";
4471 break;
4472 case IXGBE_LINK_SPEED_2_5GB_FULL:
4473 bpsmsg = "2.5 Gbps";
4474 break;
4475 case IXGBE_LINK_SPEED_1GB_FULL:
4476 bpsmsg = "1 Gbps";
4477 break;
4478 case IXGBE_LINK_SPEED_100_FULL:
4479 bpsmsg = "100 Mbps";
4480 break;
4481 case IXGBE_LINK_SPEED_10_FULL:
4482 bpsmsg = "10 Mbps";
4483 break;
4484 default:
4485 bpsmsg = "unknown speed";
4486 break;
4487 }
4488 device_printf(dev, "Link is up %s %s \n",
4489 bpsmsg, "Full Duplex");
4490 }
4491 adapter->link_active = TRUE;
4492 /* Update any Flow Control changes */
4493 ixgbe_fc_enable(&adapter->hw);
4494 /* Update DMA coalescing config */
4495 ixgbe_config_dmac(adapter);
4496 if_link_state_change(ifp, LINK_STATE_UP);
4497 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4498 ixgbe_ping_all_vfs(adapter);
4499 }
4500 } else { /* Link down */
4501 if (adapter->link_active == TRUE) {
4502 if (bootverbose)
4503 device_printf(dev, "Link is Down\n");
4504 if_link_state_change(ifp, LINK_STATE_DOWN);
4505 adapter->link_active = FALSE;
4506 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4507 ixgbe_ping_all_vfs(adapter);
4508 }
4509 }
4510
4511 return;
4512 } /* ixgbe_update_link_status */
4513
4514 /************************************************************************
4515 * ixgbe_config_dmac - Configure DMA Coalescing
4516 ************************************************************************/
4517 static void
4518 ixgbe_config_dmac(struct adapter *adapter)
4519 {
4520 struct ixgbe_hw *hw = &adapter->hw;
4521 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4522
4523 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4524 return;
4525
4526 if (dcfg->watchdog_timer ^ adapter->dmac ||
4527 dcfg->link_speed ^ adapter->link_speed) {
4528 dcfg->watchdog_timer = adapter->dmac;
4529 dcfg->fcoe_en = false;
4530 dcfg->link_speed = adapter->link_speed;
4531 dcfg->num_tcs = 1;
4532
4533 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4534 dcfg->watchdog_timer, dcfg->link_speed);
4535
4536 hw->mac.ops.dmac_config(hw);
4537 }
4538 } /* ixgbe_config_dmac */
4539
4540 /************************************************************************
4541 * ixgbe_enable_intr
4542 ************************************************************************/
4543 static void
4544 ixgbe_enable_intr(struct adapter *adapter)
4545 {
4546 struct ixgbe_hw *hw = &adapter->hw;
4547 struct ix_queue *que = adapter->queues;
4548 u32 mask, fwsm;
4549
4550 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4551
4552 switch (adapter->hw.mac.type) {
4553 case ixgbe_mac_82599EB:
4554 mask |= IXGBE_EIMS_ECC;
4555 /* Temperature sensor on some adapters */
4556 mask |= IXGBE_EIMS_GPI_SDP0;
4557 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4558 mask |= IXGBE_EIMS_GPI_SDP1;
4559 mask |= IXGBE_EIMS_GPI_SDP2;
4560 break;
4561 case ixgbe_mac_X540:
4562 /* Detect if Thermal Sensor is enabled */
4563 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4564 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4565 mask |= IXGBE_EIMS_TS;
4566 mask |= IXGBE_EIMS_ECC;
4567 break;
4568 case ixgbe_mac_X550:
4569 /* MAC thermal sensor is automatically enabled */
4570 mask |= IXGBE_EIMS_TS;
4571 mask |= IXGBE_EIMS_ECC;
4572 break;
4573 case ixgbe_mac_X550EM_x:
4574 case ixgbe_mac_X550EM_a:
4575 /* Some devices use SDP0 for important information */
4576 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4577 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4578 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4579 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4580 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4581 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4582 mask |= IXGBE_EICR_GPI_SDP0_X540;
4583 mask |= IXGBE_EIMS_ECC;
4584 break;
4585 default:
4586 break;
4587 }
4588
4589 /* Enable Fan Failure detection */
4590 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4591 mask |= IXGBE_EIMS_GPI_SDP1;
4592 /* Enable SR-IOV */
4593 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4594 mask |= IXGBE_EIMS_MAILBOX;
4595 /* Enable Flow Director */
4596 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4597 mask |= IXGBE_EIMS_FLOW_DIR;
4598
4599 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4600
4601 /* With MSI-X we use auto clear */
4602 if (adapter->msix_mem) {
4603 mask = IXGBE_EIMS_ENABLE_MASK;
4604 /* Don't autoclear Link */
4605 mask &= ~IXGBE_EIMS_OTHER;
4606 mask &= ~IXGBE_EIMS_LSC;
4607 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4608 mask &= ~IXGBE_EIMS_MAILBOX;
4609 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4610 }
4611
4612 /*
4613 * Now enable all queues, this is done separately to
4614 * allow for handling the extended (beyond 32) MSI-X
4615 * vectors that can be used by 82599
4616 */
4617 for (int i = 0; i < adapter->num_queues; i++, que++)
4618 ixgbe_enable_queue(adapter, que->msix);
4619
4620 IXGBE_WRITE_FLUSH(hw);
4621
4622 return;
4623 } /* ixgbe_enable_intr */
4624
4625 /************************************************************************
4626 * ixgbe_disable_intr
4627 ************************************************************************/
4628 static void
4629 ixgbe_disable_intr(struct adapter *adapter)
4630 {
4631 struct ix_queue *que = adapter->queues;
4632
4633 /* disable interrupts other than queues */
4634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4635
4636 if (adapter->msix_mem)
4637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4638
4639 for (int i = 0; i < adapter->num_queues; i++, que++)
4640 ixgbe_disable_queue(adapter, que->msix);
4641
4642 IXGBE_WRITE_FLUSH(&adapter->hw);
4643
4644 return;
4645 } /* ixgbe_disable_intr */
4646
4647 /************************************************************************
4648 * ixgbe_legacy_irq - Legacy Interrupt Service routine
4649 ************************************************************************/
4650 static int
4651 ixgbe_legacy_irq(void *arg)
4652 {
4653 struct ix_queue *que = arg;
4654 struct adapter *adapter = que->adapter;
4655 struct ixgbe_hw *hw = &adapter->hw;
4656 struct ifnet *ifp = adapter->ifp;
4657 struct tx_ring *txr = adapter->tx_rings;
4658 bool more = false;
4659 u32 eicr, eicr_mask;
4660
4661 /* Silicon errata #26 on 82598 */
4662 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
4663
4664 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4665
4666 adapter->stats.pf.legint.ev_count++;
4667 ++que->irqs.ev_count;
4668 if (eicr == 0) {
4669 adapter->stats.pf.intzero.ev_count++;
4670 if ((ifp->if_flags & IFF_UP) != 0)
4671 ixgbe_enable_intr(adapter);
4672 return 0;
4673 }
4674
4675 if ((ifp->if_flags & IFF_RUNNING) != 0) {
4676 #ifdef __NetBSD__
4677 /* Don't run ixgbe_rxeof in interrupt context */
4678 more = true;
4679 #else
4680 more = ixgbe_rxeof(que);
4681 #endif
4682
4683 IXGBE_TX_LOCK(txr);
4684 ixgbe_txeof(txr);
4685 #ifdef notyet
4686 if (!ixgbe_ring_empty(ifp, txr->br))
4687 ixgbe_start_locked(ifp, txr);
4688 #endif
4689 IXGBE_TX_UNLOCK(txr);
4690 }
4691
4692 /* Check for fan failure */
4693 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
4694 ixgbe_check_fan_failure(adapter, eicr, true);
4695 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4696 }
4697
4698 /* Link status change */
4699 if (eicr & IXGBE_EICR_LSC)
4700 softint_schedule(adapter->link_si);
4701
4702 if (ixgbe_is_sfp(hw)) {
4703 /* Pluggable optics-related interrupt */
4704 if (hw->mac.type >= ixgbe_mac_X540)
4705 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4706 else
4707 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4708
4709 if (eicr & eicr_mask) {
4710 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4711 softint_schedule(adapter->mod_si);
4712 }
4713
4714 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4715 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4716 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4717 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4718 softint_schedule(adapter->msf_si);
4719 }
4720 }
4721
4722 /* External PHY interrupt */
4723 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4724 (eicr & IXGBE_EICR_GPI_SDP0_X540))
4725 softint_schedule(adapter->phy_si);
4726
4727 if (more)
4728 softint_schedule(que->que_si);
4729 else
4730 ixgbe_enable_intr(adapter);
4731
4732 return 1;
4733 } /* ixgbe_legacy_irq */
4734
4735 /************************************************************************
4736 * ixgbe_free_pciintr_resources
4737 ************************************************************************/
4738 static void
4739 ixgbe_free_pciintr_resources(struct adapter *adapter)
4740 {
4741 struct ix_queue *que = adapter->queues;
4742 int rid;
4743
4744 /*
4745 * Release all msix queue resources:
4746 */
4747 for (int i = 0; i < adapter->num_queues; i++, que++) {
4748 if (que->res != NULL) {
4749 pci_intr_disestablish(adapter->osdep.pc,
4750 adapter->osdep.ihs[i]);
4751 adapter->osdep.ihs[i] = NULL;
4752 }
4753 }
4754
4755 /* Clean the Legacy or Link interrupt last */
4756 if (adapter->vector) /* we are doing MSIX */
4757 rid = adapter->vector;
4758 else
4759 rid = 0;
4760
4761 if (adapter->osdep.ihs[rid] != NULL) {
4762 pci_intr_disestablish(adapter->osdep.pc,
4763 adapter->osdep.ihs[rid]);
4764 adapter->osdep.ihs[rid] = NULL;
4765 }
4766
4767 if (adapter->osdep.intrs != NULL) {
4768 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
4769 adapter->osdep.nintrs);
4770 adapter->osdep.intrs = NULL;
4771 }
4772
4773 return;
4774 } /* ixgbe_free_pciintr_resources */
4775
4776 /************************************************************************
4777 * ixgbe_free_pci_resources
4778 ************************************************************************/
4779 static void
4780 ixgbe_free_pci_resources(struct adapter *adapter)
4781 {
4782
4783 ixgbe_free_pciintr_resources(adapter);
4784
4785 if (adapter->osdep.mem_size != 0) {
4786 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
4787 adapter->osdep.mem_bus_space_handle,
4788 adapter->osdep.mem_size);
4789 }
4790
4791 return;
4792 } /* ixgbe_free_pci_resources */
4793
4794 /************************************************************************
4795 * ixgbe_set_sysctl_value
4796 ************************************************************************/
4797 static void
4798 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4799 const char *description, int *limit, int value)
4800 {
4801 device_t dev = adapter->dev;
4802 struct sysctllog **log;
4803 const struct sysctlnode *rnode, *cnode;
4804
4805 log = &adapter->sysctllog;
4806 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4807 aprint_error_dev(dev, "could not create sysctl root\n");
4808 return;
4809 }
4810 if (sysctl_createv(log, 0, &rnode, &cnode,
4811 CTLFLAG_READWRITE, CTLTYPE_INT,
4812 name, SYSCTL_DESCR(description),
4813 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
4814 aprint_error_dev(dev, "could not create sysctl\n");
4815 *limit = value;
4816 } /* ixgbe_set_sysctl_value */
4817
4818 /************************************************************************
4819 * ixgbe_sysctl_flowcntl
4820 *
4821 * SYSCTL wrapper around setting Flow Control
4822 ************************************************************************/
4823 static int
4824 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
4825 {
4826 struct sysctlnode node = *rnode;
4827 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4828 int error, fc;
4829
4830 fc = adapter->hw.fc.current_mode;
4831 node.sysctl_data = &fc;
4832 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4833 if (error != 0 || newp == NULL)
4834 return error;
4835
4836 /* Don't bother if it's not changed */
4837 if (fc == adapter->hw.fc.current_mode)
4838 return (0);
4839
4840 return ixgbe_set_flowcntl(adapter, fc);
4841 } /* ixgbe_sysctl_flowcntl */
4842
4843 /************************************************************************
4844 * ixgbe_set_flowcntl - Set flow control
4845 *
4846 * Flow control values:
4847 * 0 - off
4848 * 1 - rx pause
4849 * 2 - tx pause
4850 * 3 - full
4851 ************************************************************************/
4852 static int
4853 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4854 {
4855 switch (fc) {
4856 case ixgbe_fc_rx_pause:
4857 case ixgbe_fc_tx_pause:
4858 case ixgbe_fc_full:
4859 adapter->hw.fc.requested_mode = fc;
4860 if (adapter->num_queues > 1)
4861 ixgbe_disable_rx_drop(adapter);
4862 break;
4863 case ixgbe_fc_none:
4864 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4865 if (adapter->num_queues > 1)
4866 ixgbe_enable_rx_drop(adapter);
4867 break;
4868 default:
4869 return (EINVAL);
4870 }
4871
4872 #if 0 /* XXX NetBSD */
4873 /* Don't autoneg if forcing a value */
4874 adapter->hw.fc.disable_fc_autoneg = TRUE;
4875 #endif
4876 ixgbe_fc_enable(&adapter->hw);
4877
4878 return (0);
4879 } /* ixgbe_set_flowcntl */
4880
4881 /************************************************************************
4882 * ixgbe_enable_rx_drop
4883 *
4884 * Enable the hardware to drop packets when the buffer is
4885 * full. This is useful with multiqueue, so that no single
4886 * queue being full stalls the entire RX engine. We only
4887 * enable this when Multiqueue is enabled AND Flow Control
4888 * is disabled.
4889 ************************************************************************/
4890 static void
4891 ixgbe_enable_rx_drop(struct adapter *adapter)
4892 {
4893 struct ixgbe_hw *hw = &adapter->hw;
4894 struct rx_ring *rxr;
4895 u32 srrctl;
4896
4897 for (int i = 0; i < adapter->num_queues; i++) {
4898 rxr = &adapter->rx_rings[i];
4899 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4900 srrctl |= IXGBE_SRRCTL_DROP_EN;
4901 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4902 }
4903
4904 /* enable drop for each vf */
4905 for (int i = 0; i < adapter->num_vfs; i++) {
4906 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4907 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4908 IXGBE_QDE_ENABLE));
4909 }
4910 } /* ixgbe_enable_rx_drop */
4911
4912 /************************************************************************
4913 * ixgbe_disable_rx_drop
4914 ************************************************************************/
4915 static void
4916 ixgbe_disable_rx_drop(struct adapter *adapter)
4917 {
4918 struct ixgbe_hw *hw = &adapter->hw;
4919 struct rx_ring *rxr;
4920 u32 srrctl;
4921
4922 for (int i = 0; i < adapter->num_queues; i++) {
4923 rxr = &adapter->rx_rings[i];
4924 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4925 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4926 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4927 }
4928
4929 /* disable drop for each vf */
4930 for (int i = 0; i < adapter->num_vfs; i++) {
4931 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4932 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4933 }
4934 } /* ixgbe_disable_rx_drop */
4935
4936 /************************************************************************
4937 * ixgbe_sysctl_advertise
4938 *
4939 * SYSCTL wrapper around setting advertised speed
4940 ************************************************************************/
4941 static int
4942 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
4943 {
4944 struct sysctlnode node = *rnode;
4945 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4946 int error = 0, advertise;
4947
4948 advertise = adapter->advertise;
4949 node.sysctl_data = &advertise;
4950 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4951 if (error != 0 || newp == NULL)
4952 return error;
4953
4954 return ixgbe_set_advertise(adapter, advertise);
4955 } /* ixgbe_sysctl_advertise */
4956
4957 /************************************************************************
4958 * ixgbe_set_advertise - Control advertised link speed
4959 *
4960 * Flags:
4961 * 0x00 - Default (all capable link speed)
4962 * 0x01 - advertise 100 Mb
4963 * 0x02 - advertise 1G
4964 * 0x04 - advertise 10G
4965 * 0x08 - advertise 10 Mb
4966 * 0x10 - advertise 2.5G
4967 * 0x20 - advertise 5G
4968 ************************************************************************/
4969 static int
4970 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4971 {
4972 device_t dev;
4973 struct ixgbe_hw *hw;
4974 ixgbe_link_speed speed = 0;
4975 ixgbe_link_speed link_caps = 0;
4976 s32 err = IXGBE_NOT_IMPLEMENTED;
4977 bool negotiate = FALSE;
4978
4979 /* Checks to validate new value */
4980 if (adapter->advertise == advertise) /* no change */
4981 return (0);
4982
4983 dev = adapter->dev;
4984 hw = &adapter->hw;
4985
4986 /* No speed changes for backplane media */
4987 if (hw->phy.media_type == ixgbe_media_type_backplane)
4988 return (ENODEV);
4989
4990 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4991 (hw->phy.multispeed_fiber))) {
4992 device_printf(dev,
4993 "Advertised speed can only be set on copper or "
4994 "multispeed fiber media types.\n");
4995 return (EINVAL);
4996 }
4997
4998 if (advertise < 0x0 || advertise > 0x2f) {
4999 device_printf(dev,
5000 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5001 return (EINVAL);
5002 }
5003
5004 if (hw->mac.ops.get_link_capabilities) {
5005 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5006 &negotiate);
5007 if (err != IXGBE_SUCCESS) {
5008 device_printf(dev, "Unable to determine supported advertise speeds\n");
5009 return (ENODEV);
5010 }
5011 }
5012
5013 /* Set new value and report new advertised mode */
5014 if (advertise & 0x1) {
5015 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5016 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5017 return (EINVAL);
5018 }
5019 speed |= IXGBE_LINK_SPEED_100_FULL;
5020 }
5021 if (advertise & 0x2) {
5022 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5023 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5024 return (EINVAL);
5025 }
5026 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5027 }
5028 if (advertise & 0x4) {
5029 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5030 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5031 return (EINVAL);
5032 }
5033 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5034 }
5035 if (advertise & 0x8) {
5036 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5037 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5038 return (EINVAL);
5039 }
5040 speed |= IXGBE_LINK_SPEED_10_FULL;
5041 }
5042 if (advertise & 0x10) {
5043 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5044 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5045 return (EINVAL);
5046 }
5047 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5048 }
5049 if (advertise & 0x20) {
5050 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5051 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5052 return (EINVAL);
5053 }
5054 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5055 }
5056 if (advertise == 0)
5057 speed = link_caps; /* All capable link speed */
5058
5059 hw->mac.autotry_restart = TRUE;
5060 hw->mac.ops.setup_link(hw, speed, TRUE);
5061 adapter->advertise = advertise;
5062
5063 return (0);
5064 } /* ixgbe_set_advertise */
5065
5066 /************************************************************************
5067 * ixgbe_get_advertise - Get current advertised speed settings
5068 *
5069 * Formatted for sysctl usage.
5070 * Flags:
5071 * 0x01 - advertise 100 Mb
5072 * 0x02 - advertise 1G
5073 * 0x04 - advertise 10G
5074 * 0x08 - advertise 10 Mb (yes, Mb)
5075 * 0x10 - advertise 2.5G
5076 * 0x20 - advertise 5G
5077 ************************************************************************/
5078 static int
5079 ixgbe_get_advertise(struct adapter *adapter)
5080 {
5081 struct ixgbe_hw *hw = &adapter->hw;
5082 int speed;
5083 ixgbe_link_speed link_caps = 0;
5084 s32 err;
5085 bool negotiate = FALSE;
5086
5087 /*
5088 * Advertised speed means nothing unless it's copper or
5089 * multi-speed fiber
5090 */
5091 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5092 !(hw->phy.multispeed_fiber))
5093 return (0);
5094
5095 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5096 if (err != IXGBE_SUCCESS)
5097 return (0);
5098
5099 speed =
5100 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5101 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5102 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5103 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5104 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5105 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5106
5107 return speed;
5108 } /* ixgbe_get_advertise */
5109
5110 /************************************************************************
5111 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5112 *
5113 * Control values:
5114 * 0/1 - off / on (use default value of 1000)
5115 *
5116 * Legal timer values are:
5117 * 50,100,250,500,1000,2000,5000,10000
5118 *
5119 * Turning off interrupt moderation will also turn this off.
5120 ************************************************************************/
5121 static int
5122 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5123 {
5124 struct sysctlnode node = *rnode;
5125 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5126 struct ifnet *ifp = adapter->ifp;
5127 int error;
5128 int newval;
5129
5130 newval = adapter->dmac;
5131 node.sysctl_data = &newval;
5132 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5133 if ((error) || (newp == NULL))
5134 return (error);
5135
5136 switch (newval) {
5137 case 0:
5138 /* Disabled */
5139 adapter->dmac = 0;
5140 break;
5141 case 1:
5142 /* Enable and use default */
5143 adapter->dmac = 1000;
5144 break;
5145 case 50:
5146 case 100:
5147 case 250:
5148 case 500:
5149 case 1000:
5150 case 2000:
5151 case 5000:
5152 case 10000:
5153 /* Legal values - allow */
5154 adapter->dmac = newval;
5155 break;
5156 default:
5157 /* Do nothing, illegal value */
5158 return (EINVAL);
5159 }
5160
5161 /* Re-initialize hardware if it's already running */
5162 if (ifp->if_flags & IFF_RUNNING)
5163 ixgbe_init(ifp);
5164
5165 return (0);
5166 }
5167
5168 #ifdef IXGBE_DEBUG
5169 /************************************************************************
5170 * ixgbe_sysctl_power_state
5171 *
5172 * Sysctl to test power states
5173 * Values:
5174 * 0 - set device to D0
5175 * 3 - set device to D3
5176 * (none) - get current device power state
5177 ************************************************************************/
5178 static int
5179 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5180 {
5181 #ifdef notyet
5182 struct sysctlnode node = *rnode;
5183 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5184 device_t dev = adapter->dev;
5185 int curr_ps, new_ps, error = 0;
5186
5187 curr_ps = new_ps = pci_get_powerstate(dev);
5188
5189 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5190 if ((error) || (req->newp == NULL))
5191 return (error);
5192
5193 if (new_ps == curr_ps)
5194 return (0);
5195
5196 if (new_ps == 3 && curr_ps == 0)
5197 error = DEVICE_SUSPEND(dev);
5198 else if (new_ps == 0 && curr_ps == 3)
5199 error = DEVICE_RESUME(dev);
5200 else
5201 return (EINVAL);
5202
5203 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5204
5205 return (error);
5206 #else
5207 return 0;
5208 #endif
5209 } /* ixgbe_sysctl_power_state */
5210 #endif
5211
5212 /************************************************************************
5213 * ixgbe_sysctl_wol_enable
5214 *
5215 * Sysctl to enable/disable the WoL capability,
5216 * if supported by the adapter.
5217 *
5218 * Values:
5219 * 0 - disabled
5220 * 1 - enabled
5221 ************************************************************************/
5222 static int
5223 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5224 {
5225 struct sysctlnode node = *rnode;
5226 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5227 struct ixgbe_hw *hw = &adapter->hw;
5228 bool new_wol_enabled;
5229 int error = 0;
5230
5231 new_wol_enabled = hw->wol_enabled;
5232 node.sysctl_data = &new_wol_enabled;
5233 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5234 if ((error) || (newp == NULL))
5235 return (error);
5236 if (new_wol_enabled == hw->wol_enabled)
5237 return (0);
5238
5239 if (new_wol_enabled && !adapter->wol_support)
5240 return (ENODEV);
5241 else
5242 hw->wol_enabled = new_wol_enabled;
5243
5244 return (0);
5245 } /* ixgbe_sysctl_wol_enable */
5246
5247 /************************************************************************
5248 * ixgbe_sysctl_wufc - Wake Up Filter Control
5249 *
5250 * Sysctl to enable/disable the types of packets that the
5251 * adapter will wake up on upon receipt.
5252 * Flags:
5253 * 0x1 - Link Status Change
5254 * 0x2 - Magic Packet
5255 * 0x4 - Direct Exact
5256 * 0x8 - Directed Multicast
5257 * 0x10 - Broadcast
5258 * 0x20 - ARP/IPv4 Request Packet
5259 * 0x40 - Direct IPv4 Packet
5260 * 0x80 - Direct IPv6 Packet
5261 *
5262 * Settings not listed above will cause the sysctl to return an error.
5263 ************************************************************************/
5264 static int
5265 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5266 {
5267 struct sysctlnode node = *rnode;
5268 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5269 int error = 0;
5270 u32 new_wufc;
5271
5272 new_wufc = adapter->wufc;
5273 node.sysctl_data = &new_wufc;
5274 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5275 if ((error) || (newp == NULL))
5276 return (error);
5277 if (new_wufc == adapter->wufc)
5278 return (0);
5279
5280 if (new_wufc & 0xffffff00)
5281 return (EINVAL);
5282
5283 new_wufc &= 0xff;
5284 new_wufc |= (0xffffff & adapter->wufc);
5285 adapter->wufc = new_wufc;
5286
5287 return (0);
5288 } /* ixgbe_sysctl_wufc */
5289
5290 #ifdef IXGBE_DEBUG
5291 /************************************************************************
5292 * ixgbe_sysctl_print_rss_config
5293 ************************************************************************/
5294 static int
5295 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5296 {
5297 #ifdef notyet
5298 struct sysctlnode node = *rnode;
5299 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5300 struct ixgbe_hw *hw = &adapter->hw;
5301 device_t dev = adapter->dev;
5302 struct sbuf *buf;
5303 int error = 0, reta_size;
5304 u32 reg;
5305
5306 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5307 if (!buf) {
5308 device_printf(dev, "Could not allocate sbuf for output.\n");
5309 return (ENOMEM);
5310 }
5311
5312 // TODO: use sbufs to make a string to print out
5313 /* Set multiplier for RETA setup and table size based on MAC */
5314 switch (adapter->hw.mac.type) {
5315 case ixgbe_mac_X550:
5316 case ixgbe_mac_X550EM_x:
5317 case ixgbe_mac_X550EM_a:
5318 reta_size = 128;
5319 break;
5320 default:
5321 reta_size = 32;
5322 break;
5323 }
5324
5325 /* Print out the redirection table */
5326 sbuf_cat(buf, "\n");
5327 for (int i = 0; i < reta_size; i++) {
5328 if (i < 32) {
5329 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5330 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5331 } else {
5332 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5333 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5334 }
5335 }
5336
5337 // TODO: print more config
5338
5339 error = sbuf_finish(buf);
5340 if (error)
5341 device_printf(dev, "Error finishing sbuf: %d\n", error);
5342
5343 sbuf_delete(buf);
5344 #endif
5345 return (0);
5346 } /* ixgbe_sysctl_print_rss_config */
5347 #endif /* IXGBE_DEBUG */
5348
5349 /************************************************************************
5350 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5351 *
5352 * For X552/X557-AT devices using an external PHY
5353 ************************************************************************/
5354 static int
5355 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5356 {
5357 struct sysctlnode node = *rnode;
5358 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5359 struct ixgbe_hw *hw = &adapter->hw;
5360 int val;
5361 u16 reg;
5362 int error;
5363
5364 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5365 device_printf(adapter->dev,
5366 "Device has no supported external thermal sensor.\n");
5367 return (ENODEV);
5368 }
5369
5370 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5371 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5372 device_printf(adapter->dev,
5373 "Error reading from PHY's current temperature register\n");
5374 return (EAGAIN);
5375 }
5376
5377 node.sysctl_data = &val;
5378
5379 /* Shift temp for output */
5380 val = reg >> 8;
5381
5382 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5383 if ((error) || (newp == NULL))
5384 return (error);
5385
5386 return (0);
5387 } /* ixgbe_sysctl_phy_temp */
5388
5389 /************************************************************************
5390 * ixgbe_sysctl_phy_overtemp_occurred
5391 *
5392 * Reports (directly from the PHY) whether the current PHY
5393 * temperature is over the overtemp threshold.
5394 ************************************************************************/
5395 static int
5396 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5397 {
5398 struct sysctlnode node = *rnode;
5399 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5400 struct ixgbe_hw *hw = &adapter->hw;
5401 int val, error;
5402 u16 reg;
5403
5404 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5405 device_printf(adapter->dev,
5406 "Device has no supported external thermal sensor.\n");
5407 return (ENODEV);
5408 }
5409
5410 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5411 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5412 device_printf(adapter->dev,
5413 "Error reading from PHY's temperature status register\n");
5414 return (EAGAIN);
5415 }
5416
5417 node.sysctl_data = &val;
5418
5419 /* Get occurrence bit */
5420 val = !!(reg & 0x4000);
5421
5422 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5423 if ((error) || (newp == NULL))
5424 return (error);
5425
5426 return (0);
5427 } /* ixgbe_sysctl_phy_overtemp_occurred */
5428
5429 /************************************************************************
5430 * ixgbe_sysctl_eee_state
5431 *
5432 * Sysctl to set EEE power saving feature
5433 * Values:
5434 * 0 - disable EEE
5435 * 1 - enable EEE
5436 * (none) - get current device EEE state
5437 ************************************************************************/
5438 static int
5439 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5440 {
5441 struct sysctlnode node = *rnode;
5442 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5443 struct ifnet *ifp = adapter->ifp;
5444 device_t dev = adapter->dev;
5445 int curr_eee, new_eee, error = 0;
5446 s32 retval;
5447
5448 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5449 node.sysctl_data = &new_eee;
5450 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5451 if ((error) || (newp == NULL))
5452 return (error);
5453
5454 /* Nothing to do */
5455 if (new_eee == curr_eee)
5456 return (0);
5457
5458 /* Not supported */
5459 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5460 return (EINVAL);
5461
5462 /* Bounds checking */
5463 if ((new_eee < 0) || (new_eee > 1))
5464 return (EINVAL);
5465
5466 retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5467 if (retval) {
5468 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5469 return (EINVAL);
5470 }
5471
5472 /* Restart auto-neg */
5473 ixgbe_init(ifp);
5474
5475 device_printf(dev, "New EEE state: %d\n", new_eee);
5476
5477 /* Cache new value */
5478 if (new_eee)
5479 adapter->feat_en |= IXGBE_FEATURE_EEE;
5480 else
5481 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5482
5483 return (error);
5484 } /* ixgbe_sysctl_eee_state */
5485
5486 /************************************************************************
5487 * ixgbe_init_device_features
5488 ************************************************************************/
5489 static void
5490 ixgbe_init_device_features(struct adapter *adapter)
5491 {
5492 adapter->feat_cap = IXGBE_FEATURE_NETMAP
5493 | IXGBE_FEATURE_RSS
5494 | IXGBE_FEATURE_MSI
5495 | IXGBE_FEATURE_MSIX
5496 | IXGBE_FEATURE_LEGACY_IRQ
5497 | IXGBE_FEATURE_LEGACY_TX;
5498
5499 /* Set capabilities first... */
5500 switch (adapter->hw.mac.type) {
5501 case ixgbe_mac_82598EB:
5502 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
5503 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5504 break;
5505 case ixgbe_mac_X540:
5506 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5507 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5508 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5509 (adapter->hw.bus.func == 0))
5510 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5511 break;
5512 case ixgbe_mac_X550:
5513 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5514 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5515 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5516 break;
5517 case ixgbe_mac_X550EM_x:
5518 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5519 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5520 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5521 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5522 break;
5523 case ixgbe_mac_X550EM_a:
5524 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5525 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5526 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5527 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5528 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5529 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5530 adapter->feat_cap |= IXGBE_FEATURE_EEE;
5531 }
5532 break;
5533 case ixgbe_mac_82599EB:
5534 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
5535 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
5536 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5537 (adapter->hw.bus.func == 0))
5538 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
5539 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5540 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5541 break;
5542 default:
5543 break;
5544 }
5545
5546 /* Enabled by default... */
5547 /* Fan failure detection */
5548 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5549 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5550 /* Netmap */
5551 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
5552 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
5553 /* EEE */
5554 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
5555 adapter->feat_en |= IXGBE_FEATURE_EEE;
5556 /* Thermal Sensor */
5557 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5558 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5559
5560 /* Enabled via global sysctl... */
5561 /* Flow Director */
5562 if (ixgbe_enable_fdir) {
5563 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
5564 adapter->feat_en |= IXGBE_FEATURE_FDIR;
5565 else
5566 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
5567 }
5568 /* Legacy (single queue) transmit */
5569 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
5570 ixgbe_enable_legacy_tx)
5571 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
5572 /*
5573 * Message Signal Interrupts - Extended (MSI-X)
5574 * Normal MSI is only enabled if MSI-X calls fail.
5575 */
5576 if (!ixgbe_enable_msix)
5577 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
5578 /* Receive-Side Scaling (RSS) */
5579 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5580 adapter->feat_en |= IXGBE_FEATURE_RSS;
5581
5582 /* Disable features with unmet dependencies... */
5583 /* No MSI-X */
5584 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
5585 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5586 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5587 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
5588 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
5589 }
5590 } /* ixgbe_init_device_features */
5591
5592 /************************************************************************
5593 * ixgbe_probe - Device identification routine
5594 *
5595 * Determines if the driver should be loaded on
5596 * adapter based on its PCI vendor/device ID.
5597 *
5598 * return BUS_PROBE_DEFAULT on success, positive on failure
5599 ************************************************************************/
5600 static int
5601 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
5602 {
5603 const struct pci_attach_args *pa = aux;
5604
5605 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
5606 }
5607
5608 static ixgbe_vendor_info_t *
5609 ixgbe_lookup(const struct pci_attach_args *pa)
5610 {
5611 ixgbe_vendor_info_t *ent;
5612 pcireg_t subid;
5613
5614 INIT_DEBUGOUT("ixgbe_lookup: begin");
5615
5616 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
5617 return NULL;
5618
5619 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
5620
5621 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
5622 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
5623 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
5624 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
5625 (ent->subvendor_id == 0)) &&
5626 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
5627 (ent->subdevice_id == 0))) {
5628 ++ixgbe_total_ports;
5629 return ent;
5630 }
5631 }
5632 return NULL;
5633 }
5634
5635 static int
5636 ixgbe_ifflags_cb(struct ethercom *ec)
5637 {
5638 struct ifnet *ifp = &ec->ec_if;
5639 struct adapter *adapter = ifp->if_softc;
5640 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
5641
5642 IXGBE_CORE_LOCK(adapter);
5643
5644 if (change != 0)
5645 adapter->if_flags = ifp->if_flags;
5646
5647 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
5648 rc = ENETRESET;
5649 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
5650 ixgbe_set_promisc(adapter);
5651
5652 /* Set up VLAN support and filter */
5653 ixgbe_setup_vlan_hw_support(adapter);
5654
5655 IXGBE_CORE_UNLOCK(adapter);
5656
5657 return rc;
5658 }
5659
5660 /************************************************************************
5661 * ixgbe_ioctl - Ioctl entry point
5662 *
5663 * Called when the user wants to configure the interface.
5664 *
5665 * return 0 on success, positive on failure
5666 ************************************************************************/
5667 static int
5668 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
5669 {
5670 struct adapter *adapter = ifp->if_softc;
5671 struct ixgbe_hw *hw = &adapter->hw;
5672 struct ifcapreq *ifcr = data;
5673 struct ifreq *ifr = data;
5674 int error = 0;
5675 int l4csum_en;
5676 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
5677 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
5678
5679 switch (command) {
5680 case SIOCSIFFLAGS:
5681 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5682 break;
5683 case SIOCADDMULTI:
5684 case SIOCDELMULTI:
5685 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
5686 break;
5687 case SIOCSIFMEDIA:
5688 case SIOCGIFMEDIA:
5689 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5690 break;
5691 case SIOCSIFCAP:
5692 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5693 break;
5694 case SIOCSIFMTU:
5695 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5696 break;
5697 #ifdef __NetBSD__
5698 case SIOCINITIFADDR:
5699 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
5700 break;
5701 case SIOCGIFFLAGS:
5702 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
5703 break;
5704 case SIOCGIFAFLAG_IN:
5705 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
5706 break;
5707 case SIOCGIFADDR:
5708 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
5709 break;
5710 case SIOCGIFMTU:
5711 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
5712 break;
5713 case SIOCGIFCAP:
5714 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
5715 break;
5716 case SIOCGETHERCAP:
5717 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
5718 break;
5719 case SIOCGLIFADDR:
5720 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
5721 break;
5722 case SIOCZIFDATA:
5723 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
5724 hw->mac.ops.clear_hw_cntrs(hw);
5725 ixgbe_clear_evcnt(adapter);
5726 break;
5727 case SIOCAIFADDR:
5728 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
5729 break;
5730 #endif
5731 default:
5732 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
5733 break;
5734 }
5735
5736 switch (command) {
5737 case SIOCSIFMEDIA:
5738 case SIOCGIFMEDIA:
5739 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
5740 case SIOCGI2C:
5741 {
5742 struct ixgbe_i2c_req i2c;
5743
5744 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5745 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5746 if (error != 0)
5747 break;
5748 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5749 error = EINVAL;
5750 break;
5751 }
5752 if (i2c.len > sizeof(i2c.data)) {
5753 error = EINVAL;
5754 break;
5755 }
5756
5757 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
5758 i2c.dev_addr, i2c.data);
5759 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5760 break;
5761 }
5762 case SIOCSIFCAP:
5763 /* Layer-4 Rx checksum offload has to be turned on and
5764 * off as a unit.
5765 */
5766 l4csum_en = ifcr->ifcr_capenable & l4csum;
5767 if (l4csum_en != l4csum && l4csum_en != 0)
5768 return EINVAL;
5769 /*FALLTHROUGH*/
5770 case SIOCADDMULTI:
5771 case SIOCDELMULTI:
5772 case SIOCSIFFLAGS:
5773 case SIOCSIFMTU:
5774 default:
5775 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
5776 return error;
5777 if ((ifp->if_flags & IFF_RUNNING) == 0)
5778 ;
5779 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
5780 IXGBE_CORE_LOCK(adapter);
5781 ixgbe_init_locked(adapter);
5782 ixgbe_recalculate_max_frame(adapter);
5783 IXGBE_CORE_UNLOCK(adapter);
5784 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
5785 /*
5786 * Multicast list has changed; set the hardware filter
5787 * accordingly.
5788 */
5789 IXGBE_CORE_LOCK(adapter);
5790 ixgbe_disable_intr(adapter);
5791 ixgbe_set_multi(adapter);
5792 ixgbe_enable_intr(adapter);
5793 IXGBE_CORE_UNLOCK(adapter);
5794 }
5795 return 0;
5796 }
5797
5798 return error;
5799 } /* ixgbe_ioctl */
5800
5801 /************************************************************************
5802 * ixgbe_check_fan_failure
5803 ************************************************************************/
5804 static void
5805 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
5806 {
5807 u32 mask;
5808
5809 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
5810 IXGBE_ESDP_SDP1;
5811
5812 if (reg & mask)
5813 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5814 } /* ixgbe_check_fan_failure */
5815
5816 /************************************************************************
5817 * ixgbe_handle_que
5818 ************************************************************************/
5819 static void
5820 ixgbe_handle_que(void *context)
5821 {
5822 struct ix_queue *que = context;
5823 struct adapter *adapter = que->adapter;
5824 struct tx_ring *txr = que->txr;
5825 struct ifnet *ifp = adapter->ifp;
5826 bool more = false;
5827
5828 adapter->handleq.ev_count++;
5829
5830 if (ifp->if_flags & IFF_RUNNING) {
5831 more = ixgbe_rxeof(que);
5832 IXGBE_TX_LOCK(txr);
5833 more |= ixgbe_txeof(txr);
5834 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5835 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
5836 ixgbe_mq_start_locked(ifp, txr);
5837 /* Only for queue 0 */
5838 /* NetBSD still needs this for CBQ */
5839 if ((&adapter->queues[0] == que)
5840 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
5841 ixgbe_legacy_start_locked(ifp, txr);
5842 IXGBE_TX_UNLOCK(txr);
5843 }
5844
5845 if (more) {
5846 if (adapter->txrx_use_workqueue) {
5847 /*
5848 * "enqueued flag" is not required here.
5849 * See ixgbe_msix_que().
5850 */
5851 workqueue_enqueue(adapter->que_wq, &que->wq_cookie,
5852 curcpu());
5853 } else {
5854 softint_schedule(que->que_si);
5855 }
5856 } else if (que->res != NULL) {
5857 /* Re-enable this interrupt */
5858 ixgbe_enable_queue(adapter, que->msix);
5859 } else
5860 ixgbe_enable_intr(adapter);
5861
5862 return;
5863 } /* ixgbe_handle_que */
5864
5865 /************************************************************************
5866 * ixgbe_handle_que_work
5867 ************************************************************************/
5868 static void
5869 ixgbe_handle_que_work(struct work *wk, void *context)
5870 {
5871 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
5872
5873 /*
5874 * "enqueued flag" is not required here.
5875 * See ixgbe_msix_que().
5876 */
5877 ixgbe_handle_que(que);
5878 }
5879
5880 /************************************************************************
5881 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
5882 ************************************************************************/
5883 static int
5884 ixgbe_allocate_legacy(struct adapter *adapter,
5885 const struct pci_attach_args *pa)
5886 {
5887 device_t dev = adapter->dev;
5888 struct ix_queue *que = adapter->queues;
5889 struct tx_ring *txr = adapter->tx_rings;
5890 int counts[PCI_INTR_TYPE_SIZE];
5891 pci_intr_type_t intr_type, max_type;
5892 char intrbuf[PCI_INTRSTR_LEN];
5893 const char *intrstr = NULL;
5894
5895 /* We allocate a single interrupt resource */
5896 max_type = PCI_INTR_TYPE_MSI;
5897 counts[PCI_INTR_TYPE_MSIX] = 0;
5898 counts[PCI_INTR_TYPE_MSI] =
5899 (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
5900 /* Check not feat_en but feat_cap to fallback to INTx */
5901 counts[PCI_INTR_TYPE_INTX] =
5902 (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
5903
5904 alloc_retry:
5905 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
5906 aprint_error_dev(dev, "couldn't alloc interrupt\n");
5907 return ENXIO;
5908 }
5909 adapter->osdep.nintrs = 1;
5910 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
5911 intrbuf, sizeof(intrbuf));
5912 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
5913 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
5914 device_xname(dev));
5915 intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
5916 if (adapter->osdep.ihs[0] == NULL) {
5917 aprint_error_dev(dev,"unable to establish %s\n",
5918 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5919 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
5920 adapter->osdep.intrs = NULL;
5921 switch (intr_type) {
5922 case PCI_INTR_TYPE_MSI:
5923 /* The next try is for INTx: Disable MSI */
5924 max_type = PCI_INTR_TYPE_INTX;
5925 counts[PCI_INTR_TYPE_INTX] = 1;
5926 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
5927 if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
5928 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5929 goto alloc_retry;
5930 } else
5931 break;
5932 case PCI_INTR_TYPE_INTX:
5933 default:
5934 /* See below */
5935 break;
5936 }
5937 }
5938 if (intr_type == PCI_INTR_TYPE_INTX) {
5939 adapter->feat_en &= ~IXGBE_FEATURE_MSI;
5940 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5941 }
5942 if (adapter->osdep.ihs[0] == NULL) {
5943 aprint_error_dev(dev,
5944 "couldn't establish interrupt%s%s\n",
5945 intrstr ? " at " : "", intrstr ? intrstr : "");
5946 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
5947 adapter->osdep.intrs = NULL;
5948 return ENXIO;
5949 }
5950 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
5951 /*
5952 * Try allocating a fast interrupt and the associated deferred
5953 * processing contexts.
5954 */
5955 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5956 txr->txr_si =
5957 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
5958 ixgbe_deferred_mq_start, txr);
5959 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
5960 ixgbe_handle_que, que);
5961
5962 if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
5963 & (txr->txr_si == NULL)) || (que->que_si == NULL)) {
5964 aprint_error_dev(dev,
5965 "could not establish software interrupts\n");
5966
5967 return ENXIO;
5968 }
5969 /* For simplicity in the handlers */
5970 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
5971
5972 return (0);
5973 } /* ixgbe_allocate_legacy */
5974
5975 /************************************************************************
5976 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
5977 ************************************************************************/
5978 static int
5979 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
5980 {
5981 device_t dev = adapter->dev;
5982 struct ix_queue *que = adapter->queues;
5983 struct tx_ring *txr = adapter->tx_rings;
5984 pci_chipset_tag_t pc;
5985 char intrbuf[PCI_INTRSTR_LEN];
5986 char intr_xname[32];
5987 char wqname[MAXCOMLEN];
5988 const char *intrstr = NULL;
5989 int error, vector = 0;
5990 int cpu_id = 0;
5991 kcpuset_t *affinity;
5992 #ifdef RSS
5993 unsigned int rss_buckets = 0;
5994 kcpuset_t cpu_mask;
5995 #endif
5996
5997 pc = adapter->osdep.pc;
5998 #ifdef RSS
5999 /*
6000 * If we're doing RSS, the number of queues needs to
6001 * match the number of RSS buckets that are configured.
6002 *
6003 * + If there's more queues than RSS buckets, we'll end
6004 * up with queues that get no traffic.
6005 *
6006 * + If there's more RSS buckets than queues, we'll end
6007 * up having multiple RSS buckets map to the same queue,
6008 * so there'll be some contention.
6009 */
6010 rss_buckets = rss_getnumbuckets();
6011 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
6012 (adapter->num_queues != rss_buckets)) {
6013 device_printf(dev,
6014 "%s: number of queues (%d) != number of RSS buckets (%d)"
6015 "; performance will be impacted.\n",
6016 __func__, adapter->num_queues, rss_buckets);
6017 }
6018 #endif
6019
6020 adapter->osdep.nintrs = adapter->num_queues + 1;
6021 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
6022 adapter->osdep.nintrs) != 0) {
6023 aprint_error_dev(dev,
6024 "failed to allocate MSI-X interrupt\n");
6025 return (ENXIO);
6026 }
6027
6028 kcpuset_create(&affinity, false);
6029 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
6030 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6031 device_xname(dev), i);
6032 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
6033 sizeof(intrbuf));
6034 #ifdef IXGBE_MPSAFE
6035 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
6036 true);
6037 #endif
6038 /* Set the handler function */
6039 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
6040 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6041 intr_xname);
6042 if (que->res == NULL) {
6043 aprint_error_dev(dev,
6044 "Failed to register QUE handler\n");
6045 error = ENXIO;
6046 goto err_out;
6047 }
6048 que->msix = vector;
6049 adapter->active_queues |= (u64)(1 << que->msix);
6050
6051 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
6052 #ifdef RSS
6053 /*
6054 * The queue ID is used as the RSS layer bucket ID.
6055 * We look up the queue ID -> RSS CPU ID and select
6056 * that.
6057 */
6058 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6059 CPU_SETOF(cpu_id, &cpu_mask);
6060 #endif
6061 } else {
6062 /*
6063 * Bind the MSI-X vector, and thus the
6064 * rings to the corresponding CPU.
6065 *
6066 * This just happens to match the default RSS
6067 * round-robin bucket -> queue -> CPU allocation.
6068 */
6069 if (adapter->num_queues > 1)
6070 cpu_id = i;
6071 }
6072 /* Round-robin affinity */
6073 kcpuset_zero(affinity);
6074 kcpuset_set(affinity, cpu_id % ncpu);
6075 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
6076 NULL);
6077 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6078 intrstr);
6079 if (error == 0) {
6080 #if 1 /* def IXGBE_DEBUG */
6081 #ifdef RSS
6082 aprintf_normal(", bound RSS bucket %d to CPU %d", i,
6083 cpu_id % ncpu);
6084 #else
6085 aprint_normal(", bound queue %d to cpu %d", i,
6086 cpu_id % ncpu);
6087 #endif
6088 #endif /* IXGBE_DEBUG */
6089 }
6090 aprint_normal("\n");
6091
6092 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6093 txr->txr_si = softint_establish(
6094 SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6095 ixgbe_deferred_mq_start, txr);
6096 if (txr->txr_si == NULL) {
6097 aprint_error_dev(dev,
6098 "couldn't establish software interrupt\n");
6099 error = ENXIO;
6100 goto err_out;
6101 }
6102 }
6103 que->que_si
6104 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6105 ixgbe_handle_que, que);
6106 if (que->que_si == NULL) {
6107 aprint_error_dev(dev,
6108 "couldn't establish software interrupt\n");
6109 error = ENXIO;
6110 goto err_out;
6111 }
6112 }
6113 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
6114 error = workqueue_create(&adapter->txr_wq, wqname,
6115 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6116 IXGBE_WORKQUEUE_FLAGS);
6117 if (error) {
6118 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
6119 goto err_out;
6120 }
6121 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6122
6123 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6124 error = workqueue_create(&adapter->que_wq, wqname,
6125 ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
6126 IXGBE_WORKQUEUE_FLAGS);
6127 if (error) {
6128 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
6129 goto err_out;
6130 }
6131
6132 /* and Link */
6133 cpu_id++;
6134 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
6135 adapter->vector = vector;
6136 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
6137 sizeof(intrbuf));
6138 #ifdef IXGBE_MPSAFE
6139 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
6140 true);
6141 #endif
6142 /* Set the link handler function */
6143 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
6144 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
6145 intr_xname);
6146 if (adapter->osdep.ihs[vector] == NULL) {
6147 adapter->res = NULL;
6148 aprint_error_dev(dev, "Failed to register LINK handler\n");
6149 error = ENXIO;
6150 goto err_out;
6151 }
6152 /* Round-robin affinity */
6153 kcpuset_zero(affinity);
6154 kcpuset_set(affinity, cpu_id % ncpu);
6155 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
6156 NULL);
6157
6158 aprint_normal_dev(dev,
6159 "for link, interrupting at %s", intrstr);
6160 if (error == 0)
6161 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
6162 else
6163 aprint_normal("\n");
6164
6165 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
6166 adapter->mbx_si =
6167 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
6168 ixgbe_handle_mbx, adapter);
6169 if (adapter->mbx_si == NULL) {
6170 aprint_error_dev(dev,
6171 "could not establish software interrupts\n");
6172
6173 error = ENXIO;
6174 goto err_out;
6175 }
6176 }
6177
6178 kcpuset_destroy(affinity);
6179 aprint_normal_dev(dev,
6180 "Using MSI-X interrupts with %d vectors\n", vector + 1);
6181
6182 return (0);
6183
6184 err_out:
6185 kcpuset_destroy(affinity);
6186 ixgbe_free_softint(adapter);
6187 ixgbe_free_pciintr_resources(adapter);
6188 return (error);
6189 } /* ixgbe_allocate_msix */
6190
6191 /************************************************************************
6192 * ixgbe_configure_interrupts
6193 *
6194 * Setup MSI-X, MSI, or legacy interrupts (in that order).
6195 * This will also depend on user settings.
6196 ************************************************************************/
6197 static int
6198 ixgbe_configure_interrupts(struct adapter *adapter)
6199 {
6200 device_t dev = adapter->dev;
6201 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6202 int want, queues, msgs;
6203
6204 /* Default to 1 queue if MSI-X setup fails */
6205 adapter->num_queues = 1;
6206
6207 /* Override by tuneable */
6208 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
6209 goto msi;
6210
6211 /*
6212 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
6213 * interrupt slot.
6214 */
6215 if (ncpu == 1)
6216 goto msi;
6217
6218 /* First try MSI-X */
6219 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
6220 msgs = MIN(msgs, IXG_MAX_NINTR);
6221 if (msgs < 2)
6222 goto msi;
6223
6224 adapter->msix_mem = (void *)1; /* XXX */
6225
6226 /* Figure out a reasonable auto config value */
6227 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
6228
6229 #ifdef RSS
6230 /* If we're doing RSS, clamp at the number of RSS buckets */
6231 if (adapter->feat_en & IXGBE_FEATURE_RSS)
6232 queues = min(queues, rss_getnumbuckets());
6233 #endif
6234 if (ixgbe_num_queues > queues) {
6235 aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
6236 ixgbe_num_queues = queues;
6237 }
6238
6239 if (ixgbe_num_queues != 0)
6240 queues = ixgbe_num_queues;
6241 else
6242 queues = min(queues,
6243 min(mac->max_tx_queues, mac->max_rx_queues));
6244
6245 /* reflect correct sysctl value */
6246 ixgbe_num_queues = queues;
6247
6248 /*
6249 * Want one vector (RX/TX pair) per queue
6250 * plus an additional for Link.
6251 */
6252 want = queues + 1;
6253 if (msgs >= want)
6254 msgs = want;
6255 else {
6256 aprint_error_dev(dev, "MSI-X Configuration Problem, "
6257 "%d vectors but %d queues wanted!\n",
6258 msgs, want);
6259 goto msi;
6260 }
6261 adapter->num_queues = queues;
6262 adapter->feat_en |= IXGBE_FEATURE_MSIX;
6263 return (0);
6264
6265 /*
6266 * MSI-X allocation failed or provided us with
6267 * less vectors than needed. Free MSI-X resources
6268 * and we'll try enabling MSI.
6269 */
6270 msi:
6271 /* Without MSI-X, some features are no longer supported */
6272 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6273 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6274 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6275 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6276
6277 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
6278 adapter->msix_mem = NULL; /* XXX */
6279 if (msgs > 1)
6280 msgs = 1;
6281 if (msgs != 0) {
6282 msgs = 1;
6283 adapter->feat_en |= IXGBE_FEATURE_MSI;
6284 return (0);
6285 }
6286
6287 if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
6288 aprint_error_dev(dev,
6289 "Device does not support legacy interrupts.\n");
6290 return 1;
6291 }
6292
6293 adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6294
6295 return (0);
6296 } /* ixgbe_configure_interrupts */
6297
6298
6299 /************************************************************************
6300 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
6301 *
6302 * Done outside of interrupt context since the driver might sleep
6303 ************************************************************************/
6304 static void
6305 ixgbe_handle_link(void *context)
6306 {
6307 struct adapter *adapter = context;
6308 struct ixgbe_hw *hw = &adapter->hw;
6309
6310 ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
6311 ixgbe_update_link_status(adapter);
6312
6313 /* Re-enable link interrupts */
6314 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
6315 } /* ixgbe_handle_link */
6316
6317 /************************************************************************
6318 * ixgbe_rearm_queues
6319 ************************************************************************/
6320 static void
6321 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6322 {
6323 u32 mask;
6324
6325 switch (adapter->hw.mac.type) {
6326 case ixgbe_mac_82598EB:
6327 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6328 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6329 break;
6330 case ixgbe_mac_82599EB:
6331 case ixgbe_mac_X540:
6332 case ixgbe_mac_X550:
6333 case ixgbe_mac_X550EM_x:
6334 case ixgbe_mac_X550EM_a:
6335 mask = (queues & 0xFFFFFFFF);
6336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6337 mask = (queues >> 32);
6338 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6339 break;
6340 default:
6341 break;
6342 }
6343 } /* ixgbe_rearm_queues */
6344