ixgbe.c revision 1.50 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*
34 * Copyright (c) 2011 The NetBSD Foundation, Inc.
35 * All rights reserved.
36 *
37 * This code is derived from software contributed to The NetBSD Foundation
38 * by Coyote Point Systems, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 292674 2015-12-23 22:45:17Z sbruno $*/
62 /*$NetBSD: ixgbe.c,v 1.50 2016/12/02 11:56:55 msaitoh Exp $*/
63
64 #include "opt_inet.h"
65 #include "opt_inet6.h"
66
67 #include "ixgbe.h"
68 #include "vlan.h"
69
70 #include <sys/cprng.h>
71
72 /*********************************************************************
73 * Driver version
74 *********************************************************************/
75 char ixgbe_driver_version[] = "3.1.13-k";
76
77
78 /*********************************************************************
79 * PCI Device ID Table
80 *
81 * Used by probe to select devices to load on
82 * Last field stores an index into ixgbe_strings
83 * Last entry must be all 0s
84 *
85 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
86 *********************************************************************/
87
88 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
89 {
90 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
91 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
92 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
93 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
94 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
95 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
96 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
122 /* required last entry */
123 {0, 0, 0, 0, 0}
124 };
125
126 /*********************************************************************
127 * Table of branding strings
128 *********************************************************************/
129
130 static const char *ixgbe_strings[] = {
131 "Intel(R) PRO/10GbE PCI-Express Network Driver"
132 };
133
134 /*********************************************************************
135 * Function prototypes
136 *********************************************************************/
137 static int ixgbe_probe(device_t, cfdata_t, void *);
138 static void ixgbe_attach(device_t, device_t, void *);
139 static int ixgbe_detach(device_t, int);
140 #if 0
141 static int ixgbe_shutdown(device_t);
142 #endif
143 static bool ixgbe_suspend(device_t, const pmf_qual_t *);
144 static bool ixgbe_resume(device_t, const pmf_qual_t *);
145 static int ixgbe_ioctl(struct ifnet *, u_long, void *);
146 static void ixgbe_ifstop(struct ifnet *, int);
147 static int ixgbe_init(struct ifnet *);
148 static void ixgbe_init_locked(struct adapter *);
149 static void ixgbe_stop(void *);
150 static void ixgbe_add_media_types(struct adapter *);
151 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
152 static int ixgbe_media_change(struct ifnet *);
153 static void ixgbe_identify_hardware(struct adapter *);
154 static int ixgbe_allocate_pci_resources(struct adapter *,
155 const struct pci_attach_args *);
156 static void ixgbe_get_slot_info(struct adapter *);
157 static int ixgbe_allocate_msix(struct adapter *,
158 const struct pci_attach_args *);
159 static int ixgbe_allocate_legacy(struct adapter *,
160 const struct pci_attach_args *);
161 static int ixgbe_setup_msix(struct adapter *);
162 static void ixgbe_free_pci_resources(struct adapter *);
163 static void ixgbe_local_timer(void *);
164 static int ixgbe_setup_interface(device_t, struct adapter *);
165 static void ixgbe_config_gpie(struct adapter *);
166 static void ixgbe_config_dmac(struct adapter *);
167 static void ixgbe_config_delay_values(struct adapter *);
168 static void ixgbe_config_link(struct adapter *);
169 static void ixgbe_check_wol_support(struct adapter *);
170 static int ixgbe_setup_low_power_mode(struct adapter *);
171 static void ixgbe_rearm_queues(struct adapter *, u64);
172
173 static void ixgbe_initialize_transmit_units(struct adapter *);
174 static void ixgbe_initialize_receive_units(struct adapter *);
175 static void ixgbe_enable_rx_drop(struct adapter *);
176 static void ixgbe_disable_rx_drop(struct adapter *);
177 static void ixgbe_initialize_rss_mapping(struct adapter *);
178
179 static void ixgbe_enable_intr(struct adapter *);
180 static void ixgbe_disable_intr(struct adapter *);
181 static void ixgbe_update_stats_counters(struct adapter *);
182 static void ixgbe_set_promisc(struct adapter *);
183 static void ixgbe_set_multi(struct adapter *);
184 static void ixgbe_update_link_status(struct adapter *);
185 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
186 static void ixgbe_configure_ivars(struct adapter *);
187 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
188
189 static void ixgbe_setup_vlan_hw_support(struct adapter *);
190 #if 0
191 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
192 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
193 #endif
194
195 static void ixgbe_add_device_sysctls(struct adapter *);
196 static void ixgbe_add_hw_stats(struct adapter *);
197
198 /* Sysctl handlers */
199 static void ixgbe_set_sysctl_value(struct adapter *, const char *,
200 const char *, int *, int);
201 static int ixgbe_set_flowcntl(SYSCTLFN_PROTO);
202 static int ixgbe_set_advertise(SYSCTLFN_PROTO);
203 static int ixgbe_sysctl_thermal_test(SYSCTLFN_PROTO);
204 static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
205 static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
206 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
207 #ifdef IXGBE_DEBUG
208 static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
209 static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
210 #endif
211 static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
212 static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
213 static int ixgbe_sysctl_eee_enable(SYSCTLFN_PROTO);
214 static int ixgbe_sysctl_eee_negotiated(SYSCTLFN_PROTO);
215 static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTLFN_PROTO);
216 static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTLFN_PROTO);
217 static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTLFN_PROTO);
218
219 /* Support for pluggable optic modules */
220 static bool ixgbe_sfp_probe(struct adapter *);
221 static void ixgbe_setup_optics(struct adapter *);
222
223 /* Legacy (single vector interrupt handler */
224 static int ixgbe_legacy_irq(void *);
225
226 /* The MSI/X Interrupt handlers */
227 static int ixgbe_msix_que(void *);
228 static int ixgbe_msix_link(void *);
229
230 /* Software interrupts for deferred work */
231 static void ixgbe_handle_que(void *);
232 static void ixgbe_handle_link(void *);
233 static void ixgbe_handle_msf(void *);
234 static void ixgbe_handle_mod(void *);
235 static void ixgbe_handle_phy(void *);
236
237 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
238 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
239
240 #ifdef IXGBE_FDIR
241 static void ixgbe_reinit_fdir(void *, int);
242 #endif
243
244 #ifdef PCI_IOV
245 static void ixgbe_ping_all_vfs(struct adapter *);
246 static void ixgbe_handle_mbx(void *, int);
247 static int ixgbe_init_iov(device_t, u16, const nvlist_t *);
248 static void ixgbe_uninit_iov(device_t);
249 static int ixgbe_add_vf(device_t, u16, const nvlist_t *);
250 static void ixgbe_initialize_iov(struct adapter *);
251 static void ixgbe_recalculate_max_frame(struct adapter *);
252 static void ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
253 #endif /* PCI_IOV */
254
255
256 /*********************************************************************
257 * FreeBSD Device Interface Entry Points
258 *********************************************************************/
259
260 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
261 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
262 DVF_DETACH_SHUTDOWN);
263
264 #if 0
265 devclass_t ix_devclass;
266 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
267
268 MODULE_DEPEND(ix, pci, 1, 1, 1);
269 MODULE_DEPEND(ix, ether, 1, 1, 1);
270 #endif
271
272 /*
273 ** TUNEABLE PARAMETERS:
274 */
275
276 /*
277 ** AIM: Adaptive Interrupt Moderation
278 ** which means that the interrupt rate
279 ** is varied over time based on the
280 ** traffic for that interrupt vector
281 */
282 static int ixgbe_enable_aim = TRUE;
283 #define SYSCTL_INT(__x, __y)
284 SYSCTL_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
285
286 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
287 SYSCTL_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
288
289 /* How many packets rxeof tries to clean at a time */
290 static int ixgbe_rx_process_limit = 256;
291 SYSCTL_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
292
293 /* How many packets txeof tries to clean at a time */
294 static int ixgbe_tx_process_limit = 256;
295 SYSCTL_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
296
297 /*
298 ** Smart speed setting, default to on
299 ** this only works as a compile option
300 ** right now as its during attach, set
301 ** this to 'ixgbe_smart_speed_off' to
302 ** disable.
303 */
304 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
305
306 /*
307 * MSIX should be the default for best performance,
308 * but this allows it to be forced off for testing.
309 */
310 static int ixgbe_enable_msix = 1;
311 SYSCTL_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
312
313 /*
314 * Number of Queues, can be set to 0,
315 * it then autoconfigures based on the
316 * number of cpus with a max of 8. This
317 * can be overriden manually here.
318 */
319 static int ixgbe_num_queues = 1;
320 SYSCTL_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
321
322 /*
323 ** Number of TX descriptors per ring,
324 ** setting higher than RX as this seems
325 ** the better performing choice.
326 */
327 static int ixgbe_txd = PERFORM_TXD;
328 SYSCTL_INT("hw.ixgbe.txd", &ixgbe_txd);
329
330 /* Number of RX descriptors per ring */
331 static int ixgbe_rxd = PERFORM_RXD;
332 SYSCTL_INT("hw.ixgbe.rxd", &ixgbe_rxd);
333
334 /*
335 ** Defining this on will allow the use
336 ** of unsupported SFP+ modules, note that
337 ** doing so you are on your own :)
338 */
339 static int allow_unsupported_sfp = false;
340 SYSCTL_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
341
342 /* Keep running tab on them for sanity check */
343 static int ixgbe_total_ports;
344
345 #ifdef IXGBE_FDIR
346 /*
347 ** Flow Director actually 'steals'
348 ** part of the packet buffer as its
349 ** filter pool, this variable controls
350 ** how much it uses:
351 ** 0 = 64K, 1 = 128K, 2 = 256K
352 */
353 static int fdir_pballoc = 1;
354 #endif
355
356 #ifdef DEV_NETMAP
357 /*
358 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
359 * be a reference on how to implement netmap support in a driver.
360 * Additional comments are in ixgbe_netmap.h .
361 *
362 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
363 * that extend the standard driver.
364 */
365 #include <dev/netmap/ixgbe_netmap.h>
366 #endif /* DEV_NETMAP */
367
368 /*********************************************************************
369 * Device identification routine
370 *
371 * ixgbe_probe determines if the driver should be loaded on
372 * adapter based on PCI vendor/device id of the adapter.
373 *
374 * return 1 on success, 0 on failure
375 *********************************************************************/
376
377 static int
378 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
379 {
380 const struct pci_attach_args *pa = aux;
381
382 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
383 }
384
385 static ixgbe_vendor_info_t *
386 ixgbe_lookup(const struct pci_attach_args *pa)
387 {
388 pcireg_t subid;
389 ixgbe_vendor_info_t *ent;
390
391 INIT_DEBUGOUT("ixgbe_probe: begin");
392
393 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
394 return NULL;
395
396 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
397
398 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
399 if (PCI_VENDOR(pa->pa_id) == ent->vendor_id &&
400 PCI_PRODUCT(pa->pa_id) == ent->device_id &&
401
402 (PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id ||
403 ent->subvendor_id == 0) &&
404
405 (PCI_SUBSYS_ID(subid) == ent->subdevice_id ||
406 ent->subdevice_id == 0)) {
407 ++ixgbe_total_ports;
408 return ent;
409 }
410 }
411 return NULL;
412 }
413
414
415 /*********************************************************************
416 * Device initialization routine
417 *
418 * The attach entry point is called when the driver is being loaded.
419 * This routine identifies the type of hardware, allocates all resources
420 * and initializes the hardware.
421 *
422 * return 0 on success, positive on failure
423 *********************************************************************/
424
425 static void
426 ixgbe_attach(device_t parent, device_t dev, void *aux)
427 {
428 struct adapter *adapter;
429 struct ixgbe_hw *hw;
430 int error = -1;
431 u16 csum;
432 u32 ctrl_ext;
433 ixgbe_vendor_info_t *ent;
434 struct pci_attach_args *pa = aux;
435
436 INIT_DEBUGOUT("ixgbe_attach: begin");
437
438 /* Allocate, clear, and link in our adapter structure */
439 adapter = device_private(dev);
440 adapter->dev = dev;
441 hw = &adapter->hw;
442 adapter->osdep.pc = pa->pa_pc;
443 adapter->osdep.tag = pa->pa_tag;
444 adapter->osdep.dmat = pa->pa_dmat;
445 adapter->osdep.attached = false;
446
447 ent = ixgbe_lookup(pa);
448
449 KASSERT(ent != NULL);
450
451 aprint_normal(": %s, Version - %s\n",
452 ixgbe_strings[ent->index], ixgbe_driver_version);
453
454 #ifdef DEV_NETMAP
455 adapter->init_locked = ixgbe_init_locked;
456 adapter->stop_locked = ixgbe_stop;
457 #endif
458
459 /* Core Lock Init*/
460 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
461
462 /* Set up the timer callout */
463 callout_init(&adapter->timer, 0);
464
465 /* Determine hardware revision */
466 ixgbe_identify_hardware(adapter);
467
468 /* Do base PCI setup - map BAR0 */
469 if (ixgbe_allocate_pci_resources(adapter, pa)) {
470 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
471 error = ENXIO;
472 goto err_out;
473 }
474
475 /* Sysctls for limiting the amount of work done in the taskqueues */
476 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
477 "max number of rx packets to process",
478 &adapter->rx_process_limit, ixgbe_rx_process_limit);
479
480 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
481 "max number of tx packets to process",
482 &adapter->tx_process_limit, ixgbe_tx_process_limit);
483
484 /* Do descriptor calc and sanity checks */
485 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
486 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
487 aprint_error_dev(dev, "TXD config issue, using default!\n");
488 adapter->num_tx_desc = DEFAULT_TXD;
489 } else
490 adapter->num_tx_desc = ixgbe_txd;
491
492 /*
493 ** With many RX rings it is easy to exceed the
494 ** system mbuf allocation. Tuning nmbclusters
495 ** can alleviate this.
496 */
497 if (nmbclusters > 0) {
498 int s;
499 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
500 if (s > nmbclusters) {
501 aprint_error_dev(dev, "RX Descriptors exceed "
502 "system mbuf max, using default instead!\n");
503 ixgbe_rxd = DEFAULT_RXD;
504 }
505 }
506
507 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
508 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
509 aprint_error_dev(dev, "RXD config issue, using default!\n");
510 adapter->num_rx_desc = DEFAULT_RXD;
511 } else
512 adapter->num_rx_desc = ixgbe_rxd;
513
514 /* Allocate our TX/RX Queues */
515 if (ixgbe_allocate_queues(adapter)) {
516 error = ENOMEM;
517 goto err_out;
518 }
519
520 /* Allocate multicast array memory. */
521 adapter->mta = malloc(sizeof(*adapter->mta) *
522 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
523 if (adapter->mta == NULL) {
524 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
525 error = ENOMEM;
526 goto err_late;
527 }
528
529 /* Initialize the shared code */
530 hw->allow_unsupported_sfp = allow_unsupported_sfp;
531 error = ixgbe_init_shared_code(hw);
532 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
533 /*
534 ** No optics in this port, set up
535 ** so the timer routine will probe
536 ** for later insertion.
537 */
538 adapter->sfp_probe = TRUE;
539 error = 0;
540 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
541 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
542 error = EIO;
543 goto err_late;
544 } else if (error) {
545 aprint_error_dev(dev, "Unable to initialize the shared code\n");
546 error = EIO;
547 goto err_late;
548 }
549
550 /* Make sure we have a good EEPROM before we read from it */
551 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
552 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
553 error = EIO;
554 goto err_late;
555 }
556
557 error = ixgbe_init_hw(hw);
558 switch (error) {
559 case IXGBE_ERR_EEPROM_VERSION:
560 aprint_error_dev(dev, "This device is a pre-production adapter/"
561 "LOM. Please be aware there may be issues associated "
562 "with your hardware.\nIf you are experiencing problems "
563 "please contact your Intel or hardware representative "
564 "who provided you with this hardware.\n");
565 break;
566 case IXGBE_ERR_SFP_NOT_SUPPORTED:
567 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
568 error = EIO;
569 aprint_error_dev(dev, "Hardware Initialization Failure\n");
570 goto err_late;
571 case IXGBE_ERR_SFP_NOT_PRESENT:
572 aprint_error_dev(dev, "No SFP+ Module found\n");
573 /* falls thru */
574 default:
575 break;
576 }
577
578 error = -1;
579 if ((adapter->msix > 1) && (ixgbe_enable_msix))
580 error = ixgbe_allocate_msix(adapter, pa);
581 if (error != 0)
582 error = ixgbe_allocate_legacy(adapter, pa);
583 if (error)
584 goto err_late;
585
586 /* Setup OS specific network interface */
587 if (ixgbe_setup_interface(dev, adapter) != 0)
588 goto err_late;
589
590 /* Initialize statistics */
591 ixgbe_update_stats_counters(adapter);
592
593 /* Check PCIE slot type/speed/width */
594 ixgbe_get_slot_info(adapter);
595
596 /* Set an initial default flow control & dmac value */
597 adapter->fc = ixgbe_fc_full;
598 adapter->dmac = 0;
599 adapter->eee_enabled = 0;
600
601 #ifdef PCI_IOV
602 if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
603 nvlist_t *pf_schema, *vf_schema;
604
605 hw->mbx.ops.init_params(hw);
606 pf_schema = pci_iov_schema_alloc_node();
607 vf_schema = pci_iov_schema_alloc_node();
608 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
609 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
610 IOV_SCHEMA_HASDEFAULT, TRUE);
611 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
612 IOV_SCHEMA_HASDEFAULT, FALSE);
613 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
614 IOV_SCHEMA_HASDEFAULT, FALSE);
615 error = pci_iov_attach(dev, pf_schema, vf_schema);
616 if (error != 0) {
617 device_printf(dev,
618 "Error %d setting up SR-IOV\n", error);
619 }
620 }
621 #endif /* PCI_IOV */
622
623 /* Check for certain supported features */
624 ixgbe_check_wol_support(adapter);
625
626 /* Add sysctls */
627 ixgbe_add_device_sysctls(adapter);
628 ixgbe_add_hw_stats(adapter);
629
630 /* let hardware know driver is loaded */
631 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
632 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
633 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
634
635 #ifdef DEV_NETMAP
636 ixgbe_netmap_attach(adapter);
637 #endif /* DEV_NETMAP */
638
639 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
640 pmf_class_network_register(dev, adapter->ifp);
641 else
642 aprint_error_dev(dev, "couldn't establish power handler\n");
643
644 INIT_DEBUGOUT("ixgbe_attach: end");
645 adapter->osdep.attached = true;
646 return;
647
648 err_late:
649 ixgbe_free_transmit_structures(adapter);
650 ixgbe_free_receive_structures(adapter);
651 err_out:
652 if (adapter->ifp != NULL)
653 if_free(adapter->ifp);
654 ixgbe_free_pci_resources(adapter);
655 if (adapter->mta != NULL)
656 free(adapter->mta, M_DEVBUF);
657 return;
658 }
659
660 /*********************************************************************
661 * Device removal routine
662 *
663 * The detach entry point is called when the driver is being removed.
664 * This routine stops the adapter and deallocates all the resources
665 * that were allocated for driver operation.
666 *
667 * return 0 on success, positive on failure
668 *********************************************************************/
669
670 static int
671 ixgbe_detach(device_t dev, int flags)
672 {
673 struct adapter *adapter = device_private(dev);
674 struct rx_ring *rxr = adapter->rx_rings;
675 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
676 struct ix_queue *que = adapter->queues;
677 struct tx_ring *txr = adapter->tx_rings;
678 u32 ctrl_ext;
679
680 INIT_DEBUGOUT("ixgbe_detach: begin");
681 if (adapter->osdep.attached == false)
682 return 0;
683
684 #if NVLAN > 0
685 /* Make sure VLANs are not using driver */
686 if (!VLAN_ATTACHED(&adapter->osdep.ec))
687 ; /* nothing to do: no VLANs */
688 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
689 vlan_ifdetach(adapter->ifp);
690 else {
691 aprint_error_dev(dev, "VLANs in use\n");
692 return EBUSY;
693 }
694 #endif
695
696 #ifdef PCI_IOV
697 if (pci_iov_detach(dev) != 0) {
698 device_printf(dev, "SR-IOV in use; detach first.\n");
699 return (EBUSY);
700 }
701 #endif /* PCI_IOV */
702
703 pmf_device_deregister(dev);
704
705 ether_ifdetach(adapter->ifp);
706 /* Stop the adapter */
707 IXGBE_CORE_LOCK(adapter);
708 ixgbe_setup_low_power_mode(adapter);
709 IXGBE_CORE_UNLOCK(adapter);
710
711 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
712 #ifndef IXGBE_LEGACY_TX
713 softint_disestablish(txr->txq_si);
714 #endif
715 softint_disestablish(que->que_si);
716 }
717
718 /* Drain the Link queue */
719 softint_disestablish(adapter->link_si);
720 softint_disestablish(adapter->mod_si);
721 softint_disestablish(adapter->msf_si);
722 #ifdef PCI_IOV
723 softint_disestablish(adapter->mbx_si);
724 #endif
725 softint_disestablish(adapter->phy_si);
726 #ifdef IXGBE_FDIR
727 softint_disestablish(adapter->fdir_si);
728 #endif
729
730 /* let hardware know driver is unloading */
731 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
732 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
733 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
734
735 callout_halt(&adapter->timer, NULL);
736 #ifdef DEV_NETMAP
737 netmap_detach(adapter->ifp);
738 #endif /* DEV_NETMAP */
739 ixgbe_free_pci_resources(adapter);
740 #if 0 /* XXX the NetBSD port is probably missing something here */
741 bus_generic_detach(dev);
742 #endif
743 if_detach(adapter->ifp);
744
745 sysctl_teardown(&adapter->sysctllog);
746 evcnt_detach(&adapter->handleq);
747 evcnt_detach(&adapter->req);
748 evcnt_detach(&adapter->morerx);
749 evcnt_detach(&adapter->moretx);
750 evcnt_detach(&adapter->txloops);
751 evcnt_detach(&adapter->efbig_tx_dma_setup);
752 evcnt_detach(&adapter->m_defrag_failed);
753 evcnt_detach(&adapter->efbig2_tx_dma_setup);
754 evcnt_detach(&adapter->einval_tx_dma_setup);
755 evcnt_detach(&adapter->other_tx_dma_setup);
756 evcnt_detach(&adapter->eagain_tx_dma_setup);
757 evcnt_detach(&adapter->enomem_tx_dma_setup);
758 evcnt_detach(&adapter->watchdog_events);
759 evcnt_detach(&adapter->tso_err);
760 evcnt_detach(&adapter->link_irq);
761
762 txr = adapter->tx_rings;
763 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
764 evcnt_detach(&txr->no_desc_avail);
765 evcnt_detach(&txr->total_packets);
766 evcnt_detach(&txr->tso_tx);
767
768 if (i < __arraycount(adapter->stats.pf.mpc)) {
769 evcnt_detach(&adapter->stats.pf.mpc[i]);
770 }
771 if (i < __arraycount(adapter->stats.pf.pxontxc)) {
772 evcnt_detach(&adapter->stats.pf.pxontxc[i]);
773 evcnt_detach(&adapter->stats.pf.pxonrxc[i]);
774 evcnt_detach(&adapter->stats.pf.pxofftxc[i]);
775 evcnt_detach(&adapter->stats.pf.pxoffrxc[i]);
776 evcnt_detach(&adapter->stats.pf.pxon2offc[i]);
777 }
778 if (i < __arraycount(adapter->stats.pf.qprc)) {
779 evcnt_detach(&adapter->stats.pf.qprc[i]);
780 evcnt_detach(&adapter->stats.pf.qptc[i]);
781 evcnt_detach(&adapter->stats.pf.qbrc[i]);
782 evcnt_detach(&adapter->stats.pf.qbtc[i]);
783 evcnt_detach(&adapter->stats.pf.qprdc[i]);
784 }
785
786 evcnt_detach(&rxr->rx_packets);
787 evcnt_detach(&rxr->rx_bytes);
788 evcnt_detach(&rxr->rx_copies);
789 evcnt_detach(&rxr->no_jmbuf);
790 evcnt_detach(&rxr->rx_discarded);
791 evcnt_detach(&rxr->rx_irq);
792 }
793 evcnt_detach(&stats->ipcs);
794 evcnt_detach(&stats->l4cs);
795 evcnt_detach(&stats->ipcs_bad);
796 evcnt_detach(&stats->l4cs_bad);
797 evcnt_detach(&stats->intzero);
798 evcnt_detach(&stats->legint);
799 evcnt_detach(&stats->crcerrs);
800 evcnt_detach(&stats->illerrc);
801 evcnt_detach(&stats->errbc);
802 evcnt_detach(&stats->mspdc);
803 evcnt_detach(&stats->mlfc);
804 evcnt_detach(&stats->mrfc);
805 evcnt_detach(&stats->rlec);
806 evcnt_detach(&stats->lxontxc);
807 evcnt_detach(&stats->lxonrxc);
808 evcnt_detach(&stats->lxofftxc);
809 evcnt_detach(&stats->lxoffrxc);
810
811 /* Packet Reception Stats */
812 evcnt_detach(&stats->tor);
813 evcnt_detach(&stats->gorc);
814 evcnt_detach(&stats->tpr);
815 evcnt_detach(&stats->gprc);
816 evcnt_detach(&stats->mprc);
817 evcnt_detach(&stats->bprc);
818 evcnt_detach(&stats->prc64);
819 evcnt_detach(&stats->prc127);
820 evcnt_detach(&stats->prc255);
821 evcnt_detach(&stats->prc511);
822 evcnt_detach(&stats->prc1023);
823 evcnt_detach(&stats->prc1522);
824 evcnt_detach(&stats->ruc);
825 evcnt_detach(&stats->rfc);
826 evcnt_detach(&stats->roc);
827 evcnt_detach(&stats->rjc);
828 evcnt_detach(&stats->mngprc);
829 evcnt_detach(&stats->xec);
830
831 /* Packet Transmission Stats */
832 evcnt_detach(&stats->gotc);
833 evcnt_detach(&stats->tpt);
834 evcnt_detach(&stats->gptc);
835 evcnt_detach(&stats->bptc);
836 evcnt_detach(&stats->mptc);
837 evcnt_detach(&stats->mngptc);
838 evcnt_detach(&stats->ptc64);
839 evcnt_detach(&stats->ptc127);
840 evcnt_detach(&stats->ptc255);
841 evcnt_detach(&stats->ptc511);
842 evcnt_detach(&stats->ptc1023);
843 evcnt_detach(&stats->ptc1522);
844
845 ixgbe_free_transmit_structures(adapter);
846 ixgbe_free_receive_structures(adapter);
847 free(adapter->mta, M_DEVBUF);
848
849 IXGBE_CORE_LOCK_DESTROY(adapter);
850 return (0);
851 }
852
853 /*********************************************************************
854 *
855 * Shutdown entry point
856 *
857 **********************************************************************/
858
859 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
860 static int
861 ixgbe_shutdown(device_t dev)
862 {
863 struct adapter *adapter = device_private(dev);
864 int error = 0;
865
866 INIT_DEBUGOUT("ixgbe_shutdown: begin");
867
868 IXGBE_CORE_LOCK(adapter);
869 error = ixgbe_setup_low_power_mode(adapter);
870 IXGBE_CORE_UNLOCK(adapter);
871
872 return (error);
873 }
874 #endif
875
876 /**
877 * Methods for going from:
878 * D0 -> D3: ixgbe_suspend
879 * D3 -> D0: ixgbe_resume
880 */
881 static bool
882 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
883 {
884 struct adapter *adapter = device_private(dev);
885 int error = 0;
886
887 INIT_DEBUGOUT("ixgbe_suspend: begin");
888
889 IXGBE_CORE_LOCK(adapter);
890
891 error = ixgbe_setup_low_power_mode(adapter);
892
893 IXGBE_CORE_UNLOCK(adapter);
894
895 return (error);
896 }
897
898 static bool
899 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
900 {
901 struct adapter *adapter = device_private(dev);
902 struct ifnet *ifp = adapter->ifp;
903 struct ixgbe_hw *hw = &adapter->hw;
904 u32 wus;
905
906 INIT_DEBUGOUT("ixgbe_resume: begin");
907
908 IXGBE_CORE_LOCK(adapter);
909
910 /* Read & clear WUS register */
911 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
912 if (wus)
913 device_printf(dev, "Woken up by (WUS): %#010x\n",
914 IXGBE_READ_REG(hw, IXGBE_WUS));
915 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
916 /* And clear WUFC until next low-power transition */
917 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
918
919 /*
920 * Required after D3->D0 transition;
921 * will re-advertise all previous advertised speeds
922 */
923 if (ifp->if_flags & IFF_UP)
924 ixgbe_init_locked(adapter);
925
926 IXGBE_CORE_UNLOCK(adapter);
927
928 return true;
929 }
930
931 static int
932 ixgbe_ifflags_cb(struct ethercom *ec)
933 {
934 struct ifnet *ifp = &ec->ec_if;
935 struct adapter *adapter = ifp->if_softc;
936 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
937
938 IXGBE_CORE_LOCK(adapter);
939
940 if (change != 0)
941 adapter->if_flags = ifp->if_flags;
942
943 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
944 rc = ENETRESET;
945 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
946 ixgbe_set_promisc(adapter);
947
948 /* Set up VLAN support and filter */
949 ixgbe_setup_vlan_hw_support(adapter);
950
951 IXGBE_CORE_UNLOCK(adapter);
952
953 return rc;
954 }
955
956 /*********************************************************************
957 * Ioctl entry point
958 *
959 * ixgbe_ioctl is called when the user wants to configure the
960 * interface.
961 *
962 * return 0 on success, positive on failure
963 **********************************************************************/
964
965 static int
966 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
967 {
968 struct adapter *adapter = ifp->if_softc;
969 struct ifcapreq *ifcr = data;
970 struct ifreq *ifr = data;
971 int error = 0;
972 int l4csum_en;
973 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
974 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
975
976 switch (command) {
977 case SIOCSIFFLAGS:
978 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
979 break;
980 case SIOCADDMULTI:
981 case SIOCDELMULTI:
982 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
983 break;
984 case SIOCSIFMEDIA:
985 case SIOCGIFMEDIA:
986 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
987 break;
988 case SIOCSIFCAP:
989 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
990 break;
991 case SIOCSIFMTU:
992 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
993 break;
994 default:
995 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
996 break;
997 }
998
999 switch (command) {
1000 case SIOCSIFMEDIA:
1001 case SIOCGIFMEDIA:
1002 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1003 case SIOCGI2C:
1004 {
1005 struct ixgbe_hw *hw = &adapter->hw;
1006 struct ixgbe_i2c_req i2c;
1007 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
1008 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1009 if (error != 0)
1010 break;
1011 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1012 error = EINVAL;
1013 break;
1014 }
1015 if (i2c.len > sizeof(i2c.data)) {
1016 error = EINVAL;
1017 break;
1018 }
1019
1020 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
1021 i2c.dev_addr, i2c.data);
1022 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1023 break;
1024 }
1025 case SIOCSIFCAP:
1026 /* Layer-4 Rx checksum offload has to be turned on and
1027 * off as a unit.
1028 */
1029 l4csum_en = ifcr->ifcr_capenable & l4csum;
1030 if (l4csum_en != l4csum && l4csum_en != 0)
1031 return EINVAL;
1032 /*FALLTHROUGH*/
1033 case SIOCADDMULTI:
1034 case SIOCDELMULTI:
1035 case SIOCSIFFLAGS:
1036 case SIOCSIFMTU:
1037 default:
1038 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
1039 return error;
1040 if ((ifp->if_flags & IFF_RUNNING) == 0)
1041 ;
1042 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
1043 IXGBE_CORE_LOCK(adapter);
1044 ixgbe_init_locked(adapter);
1045 #ifdef PCI_IOV
1046 ixgbe_recalculate_max_frame(adapter);
1047 #endif
1048 IXGBE_CORE_UNLOCK(adapter);
1049 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
1050 /*
1051 * Multicast list has changed; set the hardware filter
1052 * accordingly.
1053 */
1054 IXGBE_CORE_LOCK(adapter);
1055 ixgbe_disable_intr(adapter);
1056 ixgbe_set_multi(adapter);
1057 ixgbe_enable_intr(adapter);
1058 IXGBE_CORE_UNLOCK(adapter);
1059 }
1060 return 0;
1061 }
1062
1063 return error;
1064 }
1065
1066 /*
1067 * Set the various hardware offload abilities.
1068 *
1069 * This takes the ifnet's if_capenable flags (e.g. set by the user using
1070 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
1071 * mbuf offload flags the driver will understand.
1072 */
1073 static void
1074 ixgbe_set_if_hwassist(struct adapter *adapter)
1075 {
1076 /* XXX */
1077 }
1078
1079 /*********************************************************************
1080 * Init entry point
1081 *
1082 * This routine is used in two ways. It is used by the stack as
1083 * init entry point in network interface structure. It is also used
1084 * by the driver as a hw/sw initialization routine to get to a
1085 * consistent state.
1086 *
1087 * return 0 on success, positive on failure
1088 **********************************************************************/
1089 #define IXGBE_MHADD_MFS_SHIFT 16
1090
1091 static void
1092 ixgbe_init_locked(struct adapter *adapter)
1093 {
1094 struct ifnet *ifp = adapter->ifp;
1095 device_t dev = adapter->dev;
1096 struct ixgbe_hw *hw = &adapter->hw;
1097 struct tx_ring *txr;
1098 struct rx_ring *rxr;
1099 u32 txdctl, mhadd;
1100 u32 rxdctl, rxctrl;
1101 int err = 0;
1102 #ifdef PCI_IOV
1103 enum ixgbe_iov_mode mode;
1104 #endif
1105
1106 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
1107
1108 KASSERT(mutex_owned(&adapter->core_mtx));
1109 INIT_DEBUGOUT("ixgbe_init_locked: begin");
1110
1111 hw->adapter_stopped = FALSE;
1112 ixgbe_stop_adapter(hw);
1113 callout_stop(&adapter->timer);
1114
1115 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
1116 adapter->max_frame_size =
1117 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1118
1119 #ifdef PCI_IOV
1120 mode = ixgbe_get_iov_mode(adapter);
1121 adapter->pool = ixgbe_max_vfs(mode);
1122 /* Queue indices may change with IOV mode */
1123 for (int i = 0; i < adapter->num_queues; i++) {
1124 adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1125 adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1126 }
1127 #endif
1128 /* reprogram the RAR[0] in case user changed it. */
1129 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1130
1131 /* Get the latest mac address, User can use a LAA */
1132 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
1133 IXGBE_ETH_LENGTH_OF_ADDRESS);
1134 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1135 hw->addr_ctrl.rar_used_count = 1;
1136
1137 /* Set hardware offload abilities from ifnet flags */
1138 ixgbe_set_if_hwassist(adapter);
1139
1140 /* Prepare transmit descriptors and buffers */
1141 if (ixgbe_setup_transmit_structures(adapter)) {
1142 device_printf(dev, "Could not setup transmit structures\n");
1143 ixgbe_stop(adapter);
1144 return;
1145 }
1146
1147 ixgbe_init_hw(hw);
1148 #ifdef PCI_IOV
1149 ixgbe_initialize_iov(adapter);
1150 #endif
1151 ixgbe_initialize_transmit_units(adapter);
1152
1153 /* Setup Multicast table */
1154 ixgbe_set_multi(adapter);
1155
1156 /* Determine the correct mbuf pool, based on frame size */
1157 if (adapter->max_frame_size <= MCLBYTES)
1158 adapter->rx_mbuf_sz = MCLBYTES;
1159 else
1160 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1161
1162 /* Prepare receive descriptors and buffers */
1163 if (ixgbe_setup_receive_structures(adapter)) {
1164 device_printf(dev, "Could not setup receive structures\n");
1165 ixgbe_stop(adapter);
1166 return;
1167 }
1168
1169 /* Configure RX settings */
1170 ixgbe_initialize_receive_units(adapter);
1171
1172 /* Enable SDP & MSIX interrupts based on adapter */
1173 ixgbe_config_gpie(adapter);
1174
1175 /* Set MTU size */
1176 if (ifp->if_mtu > ETHERMTU) {
1177 /* aka IXGBE_MAXFRS on 82599 and newer */
1178 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1179 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1180 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1181 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1182 }
1183
1184 /* Now enable all the queues */
1185 for (int i = 0; i < adapter->num_queues; i++) {
1186 txr = &adapter->tx_rings[i];
1187 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1188 txdctl |= IXGBE_TXDCTL_ENABLE;
1189 /* Set WTHRESH to 8, burst writeback */
1190 txdctl |= (8 << 16);
1191 /*
1192 * When the internal queue falls below PTHRESH (32),
1193 * start prefetching as long as there are at least
1194 * HTHRESH (1) buffers ready. The values are taken
1195 * from the Intel linux driver 3.8.21.
1196 * Prefetching enables tx line rate even with 1 queue.
1197 */
1198 txdctl |= (32 << 0) | (1 << 8);
1199 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1200 }
1201
1202 for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1203 rxr = &adapter->rx_rings[i];
1204 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1205 if (hw->mac.type == ixgbe_mac_82598EB) {
1206 /*
1207 ** PTHRESH = 21
1208 ** HTHRESH = 4
1209 ** WTHRESH = 8
1210 */
1211 rxdctl &= ~0x3FFFFF;
1212 rxdctl |= 0x080420;
1213 }
1214 rxdctl |= IXGBE_RXDCTL_ENABLE;
1215 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1216 for (; j < 10; j++) {
1217 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1218 IXGBE_RXDCTL_ENABLE)
1219 break;
1220 else
1221 msec_delay(1);
1222 }
1223 wmb();
1224 #ifdef DEV_NETMAP
1225 /*
1226 * In netmap mode, we must preserve the buffers made
1227 * available to userspace before the if_init()
1228 * (this is true by default on the TX side, because
1229 * init makes all buffers available to userspace).
1230 *
1231 * netmap_reset() and the device specific routines
1232 * (e.g. ixgbe_setup_receive_rings()) map these
1233 * buffers at the end of the NIC ring, so here we
1234 * must set the RDT (tail) register to make sure
1235 * they are not overwritten.
1236 *
1237 * In this driver the NIC ring starts at RDH = 0,
1238 * RDT points to the last slot available for reception (?),
1239 * so RDT = num_rx_desc - 1 means the whole ring is available.
1240 */
1241 if (ifp->if_capenable & IFCAP_NETMAP) {
1242 struct netmap_adapter *na = NA(adapter->ifp);
1243 struct netmap_kring *kring = &na->rx_rings[i];
1244 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1245
1246 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1247 } else
1248 #endif /* DEV_NETMAP */
1249 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1250 }
1251
1252 /* Enable Receive engine */
1253 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1254 if (hw->mac.type == ixgbe_mac_82598EB)
1255 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1256 rxctrl |= IXGBE_RXCTRL_RXEN;
1257 ixgbe_enable_rx_dma(hw, rxctrl);
1258
1259 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1260
1261 /* Set up MSI/X routing */
1262 if (ixgbe_enable_msix) {
1263 ixgbe_configure_ivars(adapter);
1264 /* Set up auto-mask */
1265 if (hw->mac.type == ixgbe_mac_82598EB)
1266 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1267 else {
1268 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1269 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1270 }
1271 } else { /* Simple settings for Legacy/MSI */
1272 ixgbe_set_ivar(adapter, 0, 0, 0);
1273 ixgbe_set_ivar(adapter, 0, 0, 1);
1274 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1275 }
1276
1277 #ifdef IXGBE_FDIR
1278 /* Init Flow director */
1279 if (hw->mac.type != ixgbe_mac_82598EB) {
1280 u32 hdrm = 32 << fdir_pballoc;
1281
1282 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1283 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1284 }
1285 #endif
1286
1287 /*
1288 * Check on any SFP devices that
1289 * need to be kick-started
1290 */
1291 if (hw->phy.type == ixgbe_phy_none) {
1292 err = hw->phy.ops.identify(hw);
1293 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1294 device_printf(dev,
1295 "Unsupported SFP+ module type was detected.\n");
1296 return;
1297 }
1298 }
1299
1300 /* Set moderation on the Link interrupt */
1301 IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1302
1303 /* Configure Energy Efficient Ethernet for supported devices */
1304 if (hw->mac.ops.setup_eee) {
1305 err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
1306 if (err)
1307 device_printf(dev, "Error setting up EEE: %d\n", err);
1308 }
1309
1310 /* Config/Enable Link */
1311 ixgbe_config_link(adapter);
1312
1313 /* Hardware Packet Buffer & Flow Control setup */
1314 ixgbe_config_delay_values(adapter);
1315
1316 /* Initialize the FC settings */
1317 ixgbe_start_hw(hw);
1318
1319 /* Set up VLAN support and filter */
1320 ixgbe_setup_vlan_hw_support(adapter);
1321
1322 /* Setup DMA Coalescing */
1323 ixgbe_config_dmac(adapter);
1324
1325 /* And now turn on interrupts */
1326 ixgbe_enable_intr(adapter);
1327
1328 #ifdef PCI_IOV
1329 /* Enable the use of the MBX by the VF's */
1330 {
1331 u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1332 reg |= IXGBE_CTRL_EXT_PFRSTD;
1333 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1334 }
1335 #endif
1336
1337 /* Now inform the stack we're ready */
1338 ifp->if_flags |= IFF_RUNNING;
1339
1340 return;
1341 }
1342
1343 static int
1344 ixgbe_init(struct ifnet *ifp)
1345 {
1346 struct adapter *adapter = ifp->if_softc;
1347
1348 IXGBE_CORE_LOCK(adapter);
1349 ixgbe_init_locked(adapter);
1350 IXGBE_CORE_UNLOCK(adapter);
1351 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
1352 }
1353
1354 static void
1355 ixgbe_config_gpie(struct adapter *adapter)
1356 {
1357 struct ixgbe_hw *hw = &adapter->hw;
1358 u32 gpie;
1359
1360 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1361
1362 /* Fan Failure Interrupt */
1363 if (hw->device_id == IXGBE_DEV_ID_82598AT)
1364 gpie |= IXGBE_SDP1_GPIEN;
1365
1366 /*
1367 * Module detection (SDP2)
1368 * Media ready (SDP1)
1369 */
1370 if (hw->mac.type == ixgbe_mac_82599EB) {
1371 gpie |= IXGBE_SDP2_GPIEN;
1372 if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1373 gpie |= IXGBE_SDP1_GPIEN;
1374 }
1375
1376 /*
1377 * Thermal Failure Detection (X540)
1378 * Link Detection (X552 SFP+, X552/X557-AT)
1379 */
1380 if (hw->mac.type == ixgbe_mac_X540 ||
1381 hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1382 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1383 gpie |= IXGBE_SDP0_GPIEN_X540;
1384
1385 if (adapter->msix > 1) {
1386 /* Enable Enhanced MSIX mode */
1387 gpie |= IXGBE_GPIE_MSIX_MODE;
1388 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1389 IXGBE_GPIE_OCD;
1390 }
1391
1392 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1393 return;
1394 }
1395
1396 /*
1397 * Requires adapter->max_frame_size to be set.
1398 */
1399 static void
1400 ixgbe_config_delay_values(struct adapter *adapter)
1401 {
1402 struct ixgbe_hw *hw = &adapter->hw;
1403 u32 rxpb, frame, size, tmp;
1404
1405 frame = adapter->max_frame_size;
1406
1407 /* Calculate High Water */
1408 switch (hw->mac.type) {
1409 case ixgbe_mac_X540:
1410 case ixgbe_mac_X550:
1411 case ixgbe_mac_X550EM_x:
1412 tmp = IXGBE_DV_X540(frame, frame);
1413 break;
1414 default:
1415 tmp = IXGBE_DV(frame, frame);
1416 break;
1417 }
1418 size = IXGBE_BT2KB(tmp);
1419 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1420 hw->fc.high_water[0] = rxpb - size;
1421
1422 /* Now calculate Low Water */
1423 switch (hw->mac.type) {
1424 case ixgbe_mac_X540:
1425 case ixgbe_mac_X550:
1426 case ixgbe_mac_X550EM_x:
1427 tmp = IXGBE_LOW_DV_X540(frame);
1428 break;
1429 default:
1430 tmp = IXGBE_LOW_DV(frame);
1431 break;
1432 }
1433 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1434
1435 hw->fc.requested_mode = adapter->fc;
1436 hw->fc.pause_time = IXGBE_FC_PAUSE;
1437 hw->fc.send_xon = TRUE;
1438 }
1439
1440 /*
1441 **
1442 ** MSIX Interrupt Handlers and Tasklets
1443 **
1444 */
1445
1446 static inline void
1447 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1448 {
1449 struct ixgbe_hw *hw = &adapter->hw;
1450 u64 queue = (u64)(1ULL << vector);
1451 u32 mask;
1452
1453 if (hw->mac.type == ixgbe_mac_82598EB) {
1454 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1455 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1456 } else {
1457 mask = (queue & 0xFFFFFFFF);
1458 if (mask)
1459 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1460 mask = (queue >> 32);
1461 if (mask)
1462 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1463 }
1464 }
1465
1466 __unused static inline void
1467 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1468 {
1469 struct ixgbe_hw *hw = &adapter->hw;
1470 u64 queue = (u64)(1ULL << vector);
1471 u32 mask;
1472
1473 if (hw->mac.type == ixgbe_mac_82598EB) {
1474 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1475 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1476 } else {
1477 mask = (queue & 0xFFFFFFFF);
1478 if (mask)
1479 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1480 mask = (queue >> 32);
1481 if (mask)
1482 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1483 }
1484 }
1485
1486 static void
1487 ixgbe_handle_que(void *context)
1488 {
1489 struct ix_queue *que = context;
1490 struct adapter *adapter = que->adapter;
1491 struct tx_ring *txr = que->txr;
1492 struct ifnet *ifp = adapter->ifp;
1493
1494 adapter->handleq.ev_count++;
1495
1496 if (ifp->if_flags & IFF_RUNNING) {
1497 ixgbe_rxeof(que);
1498 IXGBE_TX_LOCK(txr);
1499 ixgbe_txeof(txr);
1500 #ifndef IXGBE_LEGACY_TX
1501 if (!drbr_empty(ifp, txr->br))
1502 ixgbe_mq_start_locked(ifp, txr);
1503 #else
1504 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1505 ixgbe_start_locked(txr, ifp);
1506 #endif
1507 IXGBE_TX_UNLOCK(txr);
1508 }
1509
1510 /* Reenable this interrupt */
1511 if (que->res != NULL)
1512 ixgbe_enable_queue(adapter, que->msix);
1513 else
1514 ixgbe_enable_intr(adapter);
1515 return;
1516 }
1517
1518
1519 /*********************************************************************
1520 *
1521 * Legacy Interrupt Service routine
1522 *
1523 **********************************************************************/
1524
1525 static int
1526 ixgbe_legacy_irq(void *arg)
1527 {
1528 struct ix_queue *que = arg;
1529 struct adapter *adapter = que->adapter;
1530 struct ixgbe_hw *hw = &adapter->hw;
1531 struct ifnet *ifp = adapter->ifp;
1532 struct tx_ring *txr = adapter->tx_rings;
1533 bool more = false;
1534 u32 reg_eicr;
1535
1536
1537 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1538
1539 adapter->stats.pf.legint.ev_count++;
1540 ++que->irqs.ev_count;
1541 if (reg_eicr == 0) {
1542 adapter->stats.pf.intzero.ev_count++;
1543 if ((ifp->if_flags & IFF_UP) != 0)
1544 ixgbe_enable_intr(adapter);
1545 return 0;
1546 }
1547
1548 if ((ifp->if_flags & IFF_RUNNING) != 0) {
1549 #ifdef __NetBSD__
1550 /* Don't run ixgbe_rxeof in interrupt context */
1551 more = true;
1552 #else
1553 more = ixgbe_rxeof(que);
1554 #endif
1555
1556 IXGBE_TX_LOCK(txr);
1557 ixgbe_txeof(txr);
1558 #ifdef IXGBE_LEGACY_TX
1559 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1560 ixgbe_start_locked(txr, ifp);
1561 #else
1562 if (!drbr_empty(ifp, txr->br))
1563 ixgbe_mq_start_locked(ifp, txr);
1564 #endif
1565 IXGBE_TX_UNLOCK(txr);
1566 }
1567
1568 /* Check for fan failure */
1569 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1570 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1571 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1572 "REPLACE IMMEDIATELY!!\n");
1573 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1574 }
1575
1576 /* Link status change */
1577 if (reg_eicr & IXGBE_EICR_LSC)
1578 softint_schedule(adapter->link_si);
1579
1580 /* External PHY interrupt */
1581 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1582 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1583 softint_schedule(adapter->phy_si);
1584
1585 if (more)
1586 #ifndef IXGBE_LEGACY_TX
1587 softint_schedule(txr->txq_si);
1588 #else
1589 softint_schedule(que->que_si);
1590 #endif
1591 else
1592 ixgbe_enable_intr(adapter);
1593 return 1;
1594 }
1595
1596
1597 /*********************************************************************
1598 *
1599 * MSIX Queue Interrupt Service routine
1600 *
1601 **********************************************************************/
1602 static int
1603 ixgbe_msix_que(void *arg)
1604 {
1605 struct ix_queue *que = arg;
1606 struct adapter *adapter = que->adapter;
1607 struct ifnet *ifp = adapter->ifp;
1608 struct tx_ring *txr = que->txr;
1609 struct rx_ring *rxr = que->rxr;
1610 bool more;
1611 u32 newitr = 0;
1612
1613
1614 /* Protect against spurious interrupts */
1615 if ((ifp->if_flags & IFF_RUNNING) == 0)
1616 return 0;
1617
1618 ixgbe_disable_queue(adapter, que->msix);
1619 ++que->irqs.ev_count;
1620
1621 #ifdef __NetBSD__
1622 /* Don't run ixgbe_rxeof in interrupt context */
1623 more = true;
1624 #else
1625 more = ixgbe_rxeof(que);
1626 #endif
1627
1628 IXGBE_TX_LOCK(txr);
1629 ixgbe_txeof(txr);
1630 #ifdef IXGBE_LEGACY_TX
1631 if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
1632 ixgbe_start_locked(txr, ifp);
1633 #else
1634 if (!drbr_empty(ifp, txr->br))
1635 ixgbe_mq_start_locked(ifp, txr);
1636 #endif
1637 IXGBE_TX_UNLOCK(txr);
1638
1639 /* Do AIM now? */
1640
1641 if (ixgbe_enable_aim == FALSE)
1642 goto no_calc;
1643 /*
1644 ** Do Adaptive Interrupt Moderation:
1645 ** - Write out last calculated setting
1646 ** - Calculate based on average size over
1647 ** the last interval.
1648 */
1649 if (que->eitr_setting)
1650 IXGBE_WRITE_REG(&adapter->hw,
1651 IXGBE_EITR(que->msix), que->eitr_setting);
1652
1653 que->eitr_setting = 0;
1654
1655 /* Idle, do nothing */
1656 if ((txr->bytes == 0) && (rxr->bytes == 0))
1657 goto no_calc;
1658
1659 if ((txr->bytes) && (txr->packets))
1660 newitr = txr->bytes/txr->packets;
1661 if ((rxr->bytes) && (rxr->packets))
1662 newitr = max(newitr,
1663 (rxr->bytes / rxr->packets));
1664 newitr += 24; /* account for hardware frame, crc */
1665
1666 /* set an upper boundary */
1667 newitr = min(newitr, 3000);
1668
1669 /* Be nice to the mid range */
1670 if ((newitr > 300) && (newitr < 1200))
1671 newitr = (newitr / 3);
1672 else
1673 newitr = (newitr / 2);
1674
1675 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1676 newitr |= newitr << 16;
1677 else
1678 newitr |= IXGBE_EITR_CNT_WDIS;
1679
1680 /* save for next interrupt */
1681 que->eitr_setting = newitr;
1682
1683 /* Reset state */
1684 txr->bytes = 0;
1685 txr->packets = 0;
1686 rxr->bytes = 0;
1687 rxr->packets = 0;
1688
1689 no_calc:
1690 if (more)
1691 softint_schedule(que->que_si);
1692 else
1693 ixgbe_enable_queue(adapter, que->msix);
1694 return 1;
1695 }
1696
1697
1698 static int
1699 ixgbe_msix_link(void *arg)
1700 {
1701 struct adapter *adapter = arg;
1702 struct ixgbe_hw *hw = &adapter->hw;
1703 u32 reg_eicr, mod_mask;
1704
1705 ++adapter->link_irq.ev_count;
1706
1707 /* Pause other interrupts */
1708 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1709
1710 /* First get the cause */
1711 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1712 /* Be sure the queue bits are not cleared */
1713 reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1714 /* Clear interrupt with write */
1715 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1716
1717 /* Link status change */
1718 if (reg_eicr & IXGBE_EICR_LSC) {
1719 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1720 softint_schedule(adapter->link_si);
1721 }
1722
1723 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1724 #ifdef IXGBE_FDIR
1725 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1726 /* This is probably overkill :) */
1727 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1728 return 1;
1729 /* Disable the interrupt */
1730 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1731 softint_schedule(adapter->fdir_si);
1732 } else
1733 #endif
1734 if (reg_eicr & IXGBE_EICR_ECC) {
1735 device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
1736 "Please Reboot!!\n");
1737 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1738 }
1739
1740 /* Check for over temp condition */
1741 if (reg_eicr & IXGBE_EICR_TS) {
1742 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
1743 "PHY IS SHUT DOWN!!\n");
1744 device_printf(adapter->dev, "System shutdown required!\n");
1745 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1746 }
1747 #ifdef PCI_IOV
1748 if (reg_eicr & IXGBE_EICR_MAILBOX)
1749 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1750 #endif
1751 }
1752
1753 /* Pluggable optics-related interrupt */
1754 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1755 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1756 else
1757 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1758
1759 if (ixgbe_is_sfp(hw)) {
1760 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1761 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1762 softint_schedule(adapter->msf_si);
1763 } else if (reg_eicr & mod_mask) {
1764 IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1765 softint_schedule(adapter->mod_si);
1766 }
1767 }
1768
1769 /* Check for fan failure */
1770 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1771 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1772 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1773 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1774 "REPLACE IMMEDIATELY!!\n");
1775 }
1776
1777 /* External PHY interrupt */
1778 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1779 (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1780 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1781 softint_schedule(adapter->phy_si);
1782 }
1783
1784 /* Re-enable other interrupts */
1785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1786 return 1;
1787 }
1788
1789 /*********************************************************************
1790 *
1791 * Media Ioctl callback
1792 *
1793 * This routine is called whenever the user queries the status of
1794 * the interface using ifconfig.
1795 *
1796 **********************************************************************/
1797 static void
1798 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1799 {
1800 struct adapter *adapter = ifp->if_softc;
1801 struct ixgbe_hw *hw = &adapter->hw;
1802 int layer;
1803
1804 INIT_DEBUGOUT("ixgbe_media_status: begin");
1805 IXGBE_CORE_LOCK(adapter);
1806 ixgbe_update_link_status(adapter);
1807
1808 ifmr->ifm_status = IFM_AVALID;
1809 ifmr->ifm_active = IFM_ETHER;
1810
1811 if (!adapter->link_active) {
1812 IXGBE_CORE_UNLOCK(adapter);
1813 return;
1814 }
1815
1816 ifmr->ifm_status |= IFM_ACTIVE;
1817 layer = adapter->phy_layer;
1818
1819 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1820 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1821 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1822 switch (adapter->link_speed) {
1823 case IXGBE_LINK_SPEED_10GB_FULL:
1824 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1825 break;
1826 case IXGBE_LINK_SPEED_1GB_FULL:
1827 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1828 break;
1829 case IXGBE_LINK_SPEED_100_FULL:
1830 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1831 break;
1832 }
1833 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1834 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1835 switch (adapter->link_speed) {
1836 case IXGBE_LINK_SPEED_10GB_FULL:
1837 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1838 break;
1839 }
1840 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1841 switch (adapter->link_speed) {
1842 case IXGBE_LINK_SPEED_10GB_FULL:
1843 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1844 break;
1845 case IXGBE_LINK_SPEED_1GB_FULL:
1846 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1847 break;
1848 }
1849 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1850 switch (adapter->link_speed) {
1851 case IXGBE_LINK_SPEED_10GB_FULL:
1852 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1853 break;
1854 case IXGBE_LINK_SPEED_1GB_FULL:
1855 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1856 break;
1857 }
1858 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1859 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1860 switch (adapter->link_speed) {
1861 case IXGBE_LINK_SPEED_10GB_FULL:
1862 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1863 break;
1864 case IXGBE_LINK_SPEED_1GB_FULL:
1865 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1866 break;
1867 }
1868 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1869 switch (adapter->link_speed) {
1870 case IXGBE_LINK_SPEED_10GB_FULL:
1871 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1872 break;
1873 }
1874 /*
1875 ** XXX: These need to use the proper media types once
1876 ** they're added.
1877 */
1878 #ifndef IFM_ETH_XTYPE
1879 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1880 switch (adapter->link_speed) {
1881 case IXGBE_LINK_SPEED_10GB_FULL:
1882 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1883 break;
1884 case IXGBE_LINK_SPEED_2_5GB_FULL:
1885 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1886 break;
1887 case IXGBE_LINK_SPEED_1GB_FULL:
1888 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1889 break;
1890 }
1891 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1892 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1893 switch (adapter->link_speed) {
1894 case IXGBE_LINK_SPEED_10GB_FULL:
1895 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1896 break;
1897 case IXGBE_LINK_SPEED_2_5GB_FULL:
1898 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1899 break;
1900 case IXGBE_LINK_SPEED_1GB_FULL:
1901 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1902 break;
1903 }
1904 #else
1905 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1906 switch (adapter->link_speed) {
1907 case IXGBE_LINK_SPEED_10GB_FULL:
1908 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1909 break;
1910 case IXGBE_LINK_SPEED_2_5GB_FULL:
1911 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1912 break;
1913 case IXGBE_LINK_SPEED_1GB_FULL:
1914 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1915 break;
1916 }
1917 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1918 || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1919 switch (adapter->link_speed) {
1920 case IXGBE_LINK_SPEED_10GB_FULL:
1921 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1922 break;
1923 case IXGBE_LINK_SPEED_2_5GB_FULL:
1924 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1925 break;
1926 case IXGBE_LINK_SPEED_1GB_FULL:
1927 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1928 break;
1929 }
1930 #endif
1931
1932 /* If nothing is recognized... */
1933 #if 0
1934 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1935 ifmr->ifm_active |= IFM_UNKNOWN;
1936 #endif
1937
1938 /* Display current flow control setting used on link */
1939 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1940 hw->fc.current_mode == ixgbe_fc_full)
1941 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1942 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1943 hw->fc.current_mode == ixgbe_fc_full)
1944 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1945
1946 IXGBE_CORE_UNLOCK(adapter);
1947
1948 return;
1949 }
1950
1951 /*********************************************************************
1952 *
1953 * Media Ioctl callback
1954 *
1955 * This routine is called when the user changes speed/duplex using
1956 * media/mediopt option with ifconfig.
1957 *
1958 **********************************************************************/
1959 static int
1960 ixgbe_media_change(struct ifnet * ifp)
1961 {
1962 struct adapter *adapter = ifp->if_softc;
1963 struct ifmedia *ifm = &adapter->media;
1964 struct ixgbe_hw *hw = &adapter->hw;
1965 ixgbe_link_speed speed = 0;
1966
1967 INIT_DEBUGOUT("ixgbe_media_change: begin");
1968
1969 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1970 return (EINVAL);
1971
1972 if (hw->phy.media_type == ixgbe_media_type_backplane)
1973 return (ENODEV);
1974
1975 /*
1976 ** We don't actually need to check against the supported
1977 ** media types of the adapter; ifmedia will take care of
1978 ** that for us.
1979 */
1980 #ifndef IFM_ETH_XTYPE
1981 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1982 case IFM_AUTO:
1983 case IFM_10G_T:
1984 speed |= IXGBE_LINK_SPEED_100_FULL;
1985 case IFM_10G_LRM:
1986 case IFM_10G_SR: /* KR, too */
1987 case IFM_10G_LR:
1988 case IFM_10G_CX4: /* KX4 */
1989 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1990 case IFM_10G_TWINAX:
1991 speed |= IXGBE_LINK_SPEED_10GB_FULL;
1992 break;
1993 case IFM_1000_T:
1994 speed |= IXGBE_LINK_SPEED_100_FULL;
1995 case IFM_1000_LX:
1996 case IFM_1000_SX:
1997 case IFM_1000_CX: /* KX */
1998 speed |= IXGBE_LINK_SPEED_1GB_FULL;
1999 break;
2000 case IFM_100_TX:
2001 speed |= IXGBE_LINK_SPEED_100_FULL;
2002 break;
2003 default:
2004 goto invalid;
2005 }
2006 #else
2007 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2008 case IFM_AUTO:
2009 case IFM_10G_T:
2010 speed |= IXGBE_LINK_SPEED_100_FULL;
2011 case IFM_10G_LRM:
2012 case IFM_10G_KR:
2013 case IFM_10G_LR:
2014 case IFM_10G_KX4:
2015 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2016 case IFM_10G_TWINAX:
2017 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2018 break;
2019 case IFM_1000_T:
2020 speed |= IXGBE_LINK_SPEED_100_FULL;
2021 case IFM_1000_LX:
2022 case IFM_1000_SX:
2023 case IFM_1000_KX:
2024 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2025 break;
2026 case IFM_100_TX:
2027 speed |= IXGBE_LINK_SPEED_100_FULL;
2028 break;
2029 default:
2030 goto invalid;
2031 }
2032 #endif
2033
2034 hw->mac.autotry_restart = TRUE;
2035 hw->mac.ops.setup_link(hw, speed, TRUE);
2036 adapter->advertise =
2037 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
2038 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
2039 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
2040
2041 return (0);
2042
2043 invalid:
2044 device_printf(adapter->dev, "Invalid media type!\n");
2045 return (EINVAL);
2046 }
2047
2048 static void
2049 ixgbe_set_promisc(struct adapter *adapter)
2050 {
2051 struct ether_multi *enm;
2052 struct ether_multistep step;
2053 u_int32_t reg_rctl;
2054 struct ethercom *ec = &adapter->osdep.ec;
2055 struct ifnet *ifp = adapter->ifp;
2056 int mcnt = 0;
2057
2058 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2059 reg_rctl &= (~IXGBE_FCTRL_UPE);
2060 if (ifp->if_flags & IFF_ALLMULTI)
2061 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2062 else {
2063 ETHER_FIRST_MULTI(step, ec, enm);
2064 while (enm != NULL) {
2065 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2066 break;
2067 mcnt++;
2068 ETHER_NEXT_MULTI(step, enm);
2069 }
2070 }
2071 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2072 reg_rctl &= (~IXGBE_FCTRL_MPE);
2073 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2074
2075 if (ifp->if_flags & IFF_PROMISC) {
2076 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2077 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2078 } else if (ifp->if_flags & IFF_ALLMULTI) {
2079 reg_rctl |= IXGBE_FCTRL_MPE;
2080 reg_rctl &= ~IXGBE_FCTRL_UPE;
2081 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
2082 }
2083 return;
2084 }
2085
2086
2087 /*********************************************************************
2088 * Multicast Update
2089 *
2090 * This routine is called whenever multicast address list is updated.
2091 *
2092 **********************************************************************/
2093 #define IXGBE_RAR_ENTRIES 16
2094
2095 static void
2096 ixgbe_set_multi(struct adapter *adapter)
2097 {
2098 u32 fctrl;
2099 u8 *update_ptr;
2100 struct ixgbe_mc_addr *mta;
2101 int mcnt = 0;
2102 struct ifnet *ifp = adapter->ifp;
2103 struct ethercom *ec = &adapter->osdep.ec;
2104 struct ether_multi *enm;
2105 struct ether_multistep step;
2106
2107 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2108
2109 mta = adapter->mta;
2110 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2111
2112 ifp->if_flags &= ~IFF_ALLMULTI;
2113 ETHER_FIRST_MULTI(step, ec, enm);
2114 while (enm != NULL) {
2115 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
2116 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
2117 ETHER_ADDR_LEN) != 0)) {
2118 ifp->if_flags |= IFF_ALLMULTI;
2119 break;
2120 }
2121 bcopy(enm->enm_addrlo,
2122 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2123 mta[mcnt].vmdq = adapter->pool;
2124 mcnt++;
2125 ETHER_NEXT_MULTI(step, enm);
2126 }
2127
2128 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2129 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2130 if (ifp->if_flags & IFF_PROMISC)
2131 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2132 else if (ifp->if_flags & IFF_ALLMULTI) {
2133 fctrl |= IXGBE_FCTRL_MPE;
2134 }
2135
2136 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2137
2138 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2139 update_ptr = (u8 *)mta;
2140 ixgbe_update_mc_addr_list(&adapter->hw,
2141 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
2142 }
2143
2144 return;
2145 }
2146
2147 /*
2148 * This is an iterator function now needed by the multicast
2149 * shared code. It simply feeds the shared code routine the
2150 * addresses in the array of ixgbe_set_multi() one by one.
2151 */
2152 static u8 *
2153 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2154 {
2155 struct ixgbe_mc_addr *mta;
2156
2157 mta = (struct ixgbe_mc_addr *)*update_ptr;
2158 *vmdq = mta->vmdq;
2159
2160 *update_ptr = (u8*)(mta + 1);;
2161 return (mta->addr);
2162 }
2163
2164
2165 /*********************************************************************
2166 * Timer routine
2167 *
2168 * This routine checks for link status,updates statistics,
2169 * and runs the watchdog check.
2170 *
2171 **********************************************************************/
2172
2173 static void
2174 ixgbe_local_timer1(void *arg)
2175 {
2176 struct adapter *adapter = arg;
2177 device_t dev = adapter->dev;
2178 struct ix_queue *que = adapter->queues;
2179 u64 queues = 0;
2180 int hung = 0;
2181
2182 KASSERT(mutex_owned(&adapter->core_mtx));
2183
2184 /* Check for pluggable optics */
2185 if (adapter->sfp_probe)
2186 if (!ixgbe_sfp_probe(adapter))
2187 goto out; /* Nothing to do */
2188
2189 ixgbe_update_link_status(adapter);
2190 ixgbe_update_stats_counters(adapter);
2191
2192 /*
2193 ** Check the TX queues status
2194 ** - mark hung queues so we don't schedule on them
2195 ** - watchdog only if all queues show hung
2196 */
2197 for (int i = 0; i < adapter->num_queues; i++, que++) {
2198 /* Keep track of queues with work for soft irq */
2199 if (que->txr->busy)
2200 queues |= ((u64)1 << que->me);
2201 /*
2202 ** Each time txeof runs without cleaning, but there
2203 ** are uncleaned descriptors it increments busy. If
2204 ** we get to the MAX we declare it hung.
2205 */
2206 if (que->busy == IXGBE_QUEUE_HUNG) {
2207 ++hung;
2208 /* Mark the queue as inactive */
2209 adapter->active_queues &= ~((u64)1 << que->me);
2210 continue;
2211 } else {
2212 /* Check if we've come back from hung */
2213 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2214 adapter->active_queues |= ((u64)1 << que->me);
2215 }
2216 if (que->busy >= IXGBE_MAX_TX_BUSY) {
2217 device_printf(dev,"Warning queue %d "
2218 "appears to be hung!\n", i);
2219 que->txr->busy = IXGBE_QUEUE_HUNG;
2220 ++hung;
2221 }
2222
2223 }
2224 /* Only truely watchdog if all queues show hung */
2225 if (hung == adapter->num_queues)
2226 goto watchdog;
2227 else if (queues != 0) { /* Force an IRQ on queues with work */
2228 ixgbe_rearm_queues(adapter, queues);
2229 }
2230
2231 out:
2232 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2233 return;
2234
2235 watchdog:
2236 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2237 adapter->ifp->if_flags &= ~IFF_RUNNING;
2238 adapter->watchdog_events.ev_count++;
2239 ixgbe_init_locked(adapter);
2240 }
2241
2242 static void
2243 ixgbe_local_timer(void *arg)
2244 {
2245 struct adapter *adapter = arg;
2246
2247 IXGBE_CORE_LOCK(adapter);
2248 ixgbe_local_timer1(adapter);
2249 IXGBE_CORE_UNLOCK(adapter);
2250 }
2251
2252
2253 /*
2254 ** Note: this routine updates the OS on the link state
2255 ** the real check of the hardware only happens with
2256 ** a link interrupt.
2257 */
2258 static void
2259 ixgbe_update_link_status(struct adapter *adapter)
2260 {
2261 struct ifnet *ifp = adapter->ifp;
2262 device_t dev = adapter->dev;
2263
2264 if (adapter->link_up){
2265 if (adapter->link_active == FALSE) {
2266 if (bootverbose)
2267 device_printf(dev,"Link is up %d Gbps %s \n",
2268 ((adapter->link_speed == 128)? 10:1),
2269 "Full Duplex");
2270 adapter->link_active = TRUE;
2271 /* Update any Flow Control changes */
2272 ixgbe_fc_enable(&adapter->hw);
2273 /* Update DMA coalescing config */
2274 ixgbe_config_dmac(adapter);
2275 if_link_state_change(ifp, LINK_STATE_UP);
2276 #ifdef PCI_IOV
2277 ixgbe_ping_all_vfs(adapter);
2278 #endif
2279 }
2280 } else { /* Link down */
2281 if (adapter->link_active == TRUE) {
2282 if (bootverbose)
2283 device_printf(dev,"Link is Down\n");
2284 if_link_state_change(ifp, LINK_STATE_DOWN);
2285 adapter->link_active = FALSE;
2286 #ifdef PCI_IOV
2287 ixgbe_ping_all_vfs(adapter);
2288 #endif
2289 }
2290 }
2291
2292 return;
2293 }
2294
2295
2296 static void
2297 ixgbe_ifstop(struct ifnet *ifp, int disable)
2298 {
2299 struct adapter *adapter = ifp->if_softc;
2300
2301 IXGBE_CORE_LOCK(adapter);
2302 ixgbe_stop(adapter);
2303 IXGBE_CORE_UNLOCK(adapter);
2304 }
2305
2306 /*********************************************************************
2307 *
2308 * This routine disables all traffic on the adapter by issuing a
2309 * global reset on the MAC and deallocates TX/RX buffers.
2310 *
2311 **********************************************************************/
2312
2313 static void
2314 ixgbe_stop(void *arg)
2315 {
2316 struct ifnet *ifp;
2317 struct adapter *adapter = arg;
2318 struct ixgbe_hw *hw = &adapter->hw;
2319 ifp = adapter->ifp;
2320
2321 KASSERT(mutex_owned(&adapter->core_mtx));
2322
2323 INIT_DEBUGOUT("ixgbe_stop: begin\n");
2324 ixgbe_disable_intr(adapter);
2325 callout_stop(&adapter->timer);
2326
2327 /* Let the stack know...*/
2328 ifp->if_flags &= ~IFF_RUNNING;
2329
2330 ixgbe_reset_hw(hw);
2331 hw->adapter_stopped = FALSE;
2332 ixgbe_stop_adapter(hw);
2333 if (hw->mac.type == ixgbe_mac_82599EB)
2334 ixgbe_stop_mac_link_on_d3_82599(hw);
2335 /* Turn off the laser - noop with no optics */
2336 ixgbe_disable_tx_laser(hw);
2337
2338 /* Update the stack */
2339 adapter->link_up = FALSE;
2340 ixgbe_update_link_status(adapter);
2341
2342 /* reprogram the RAR[0] in case user changed it. */
2343 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2344
2345 return;
2346 }
2347
2348
2349 /*********************************************************************
2350 *
2351 * Determine hardware revision.
2352 *
2353 **********************************************************************/
2354 static void
2355 ixgbe_identify_hardware(struct adapter *adapter)
2356 {
2357 pcitag_t tag;
2358 pci_chipset_tag_t pc;
2359 pcireg_t subid, id;
2360 struct ixgbe_hw *hw = &adapter->hw;
2361
2362 pc = adapter->osdep.pc;
2363 tag = adapter->osdep.tag;
2364
2365 id = pci_conf_read(pc, tag, PCI_ID_REG);
2366 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
2367
2368 /* Save off the information about this board */
2369 hw->vendor_id = PCI_VENDOR(id);
2370 hw->device_id = PCI_PRODUCT(id);
2371 hw->revision_id =
2372 PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
2373 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
2374 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
2375
2376 /*
2377 ** Make sure BUSMASTER is set
2378 */
2379 ixgbe_pci_enable_busmaster(pc, tag);
2380
2381 /* We need this here to set the num_segs below */
2382 ixgbe_set_mac_type(hw);
2383
2384 /* Pick up the 82599 settings */
2385 if (hw->mac.type != ixgbe_mac_82598EB) {
2386 hw->phy.smart_speed = ixgbe_smart_speed;
2387 adapter->num_segs = IXGBE_82599_SCATTER;
2388 } else
2389 adapter->num_segs = IXGBE_82598_SCATTER;
2390
2391 return;
2392 }
2393
2394 /*********************************************************************
2395 *
2396 * Determine optic type
2397 *
2398 **********************************************************************/
2399 static void
2400 ixgbe_setup_optics(struct adapter *adapter)
2401 {
2402 struct ixgbe_hw *hw = &adapter->hw;
2403 int layer;
2404
2405 layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2406
2407 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2408 adapter->optics = IFM_10G_T;
2409 return;
2410 }
2411
2412 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2413 adapter->optics = IFM_1000_T;
2414 return;
2415 }
2416
2417 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2418 adapter->optics = IFM_1000_SX;
2419 return;
2420 }
2421
2422 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2423 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2424 adapter->optics = IFM_10G_LR;
2425 return;
2426 }
2427
2428 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2429 adapter->optics = IFM_10G_SR;
2430 return;
2431 }
2432
2433 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2434 adapter->optics = IFM_10G_TWINAX;
2435 return;
2436 }
2437
2438 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2439 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2440 adapter->optics = IFM_10G_CX4;
2441 return;
2442 }
2443
2444 /* If we get here just set the default */
2445 adapter->optics = IFM_ETHER | IFM_AUTO;
2446 return;
2447 }
2448
2449 /*********************************************************************
2450 *
2451 * Setup the Legacy or MSI Interrupt handler
2452 *
2453 **********************************************************************/
2454 static int
2455 ixgbe_allocate_legacy(struct adapter *adapter,
2456 const struct pci_attach_args *pa)
2457 {
2458 device_t dev = adapter->dev;
2459 struct ix_queue *que = adapter->queues;
2460 #ifndef IXGBE_LEGACY_TX
2461 struct tx_ring *txr = adapter->tx_rings;
2462 #endif
2463 int counts[PCI_INTR_TYPE_SIZE];
2464 pci_intr_type_t intr_type, max_type;
2465 char intrbuf[PCI_INTRSTR_LEN];
2466 const char *intrstr = NULL;
2467
2468 /* Allocation settings */
2469 max_type = PCI_INTR_TYPE_MSI;
2470 counts[PCI_INTR_TYPE_MSIX] = 0;
2471 counts[PCI_INTR_TYPE_MSI] = 1;
2472 counts[PCI_INTR_TYPE_INTX] = 1;
2473
2474 alloc_retry:
2475 if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
2476 aprint_error_dev(dev, "couldn't alloc interrupt\n");
2477 return ENXIO;
2478 }
2479 adapter->osdep.nintrs = 1;
2480 intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
2481 intrbuf, sizeof(intrbuf));
2482 adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
2483 adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
2484 device_xname(dev));
2485 if (adapter->osdep.ihs[0] == NULL) {
2486 intr_type = pci_intr_type(adapter->osdep.pc,
2487 adapter->osdep.intrs[0]);
2488 aprint_error_dev(dev,"unable to establish %s\n",
2489 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
2490 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
2491 switch (intr_type) {
2492 case PCI_INTR_TYPE_MSI:
2493 /* The next try is for INTx: Disable MSI */
2494 max_type = PCI_INTR_TYPE_INTX;
2495 counts[PCI_INTR_TYPE_INTX] = 1;
2496 goto alloc_retry;
2497 case PCI_INTR_TYPE_INTX:
2498 default:
2499 /* See below */
2500 break;
2501 }
2502 }
2503 if (adapter->osdep.ihs[0] == NULL) {
2504 aprint_error_dev(dev,
2505 "couldn't establish interrupt%s%s\n",
2506 intrstr ? " at " : "", intrstr ? intrstr : "");
2507 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
2508 return ENXIO;
2509 }
2510 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
2511 /*
2512 * Try allocating a fast interrupt and the associated deferred
2513 * processing contexts.
2514 */
2515 #ifndef IXGBE_LEGACY_TX
2516 txr->txq_si = softint_establish(SOFTINT_NET, ixgbe_deferred_mq_start,
2517 txr);
2518 #endif
2519 que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que, que);
2520
2521 /* Tasklets for Link, SFP and Multispeed Fiber */
2522 adapter->link_si =
2523 softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
2524 adapter->mod_si =
2525 softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
2526 adapter->msf_si =
2527 softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
2528 adapter->phy_si =
2529 softint_establish(SOFTINT_NET, ixgbe_handle_phy, adapter);
2530
2531 #ifdef IXGBE_FDIR
2532 adapter->fdir_si =
2533 softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
2534 #endif
2535 if (que->que_si == NULL ||
2536 adapter->link_si == NULL ||
2537 adapter->mod_si == NULL ||
2538 #ifdef IXGBE_FDIR
2539 adapter->fdir_si == NULL ||
2540 #endif
2541 adapter->msf_si == NULL) {
2542 aprint_error_dev(dev,
2543 "could not establish software interrupts\n");
2544 return ENXIO;
2545 }
2546
2547 /* For simplicity in the handlers */
2548 adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2549
2550 return (0);
2551 }
2552
2553
2554 /*********************************************************************
2555 *
2556 * Setup MSIX Interrupt resources and handlers
2557 *
2558 **********************************************************************/
2559 static int
2560 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2561 {
2562 device_t dev = adapter->dev;
2563 struct ix_queue *que = adapter->queues;
2564 struct tx_ring *txr = adapter->tx_rings;
2565 pci_chipset_tag_t pc;
2566 char intrbuf[PCI_INTRSTR_LEN];
2567 char intr_xname[32];
2568 const char *intrstr = NULL;
2569 int error, vector = 0;
2570 int cpu_id = 0;
2571 kcpuset_t *affinity;
2572 #ifdef RSS
2573 cpuset_t cpu_mask;
2574 #endif
2575
2576 pc = adapter->osdep.pc;
2577 #ifdef RSS
2578 /*
2579 * If we're doing RSS, the number of queues needs to
2580 * match the number of RSS buckets that are configured.
2581 *
2582 * + If there's more queues than RSS buckets, we'll end
2583 * up with queues that get no traffic.
2584 *
2585 * + If there's more RSS buckets than queues, we'll end
2586 * up having multiple RSS buckets map to the same queue,
2587 * so there'll be some contention.
2588 */
2589 if (adapter->num_queues != rss_getnumbuckets()) {
2590 device_printf(dev,
2591 "%s: number of queues (%d) != number of RSS buckets (%d)"
2592 "; performance will be impacted.\n",
2593 __func__,
2594 adapter->num_queues,
2595 rss_getnumbuckets());
2596 }
2597 #endif
2598
2599 adapter->osdep.nintrs = adapter->num_queues + 1;
2600 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2601 adapter->osdep.nintrs) != 0) {
2602 aprint_error_dev(dev,
2603 "failed to allocate MSI-X interrupt\n");
2604 return (ENXIO);
2605 }
2606
2607 kcpuset_create(&affinity, false);
2608 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2609 snprintf(intr_xname, sizeof(intr_xname), "%s TX/RX",
2610 device_xname(dev));
2611 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2612 sizeof(intrbuf));
2613 #ifdef IXG_MPSAFE
2614 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2615 true);
2616 #endif
2617 /* Set the handler function */
2618 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2619 adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
2620 intr_xname);
2621 if (que->res == NULL) {
2622 pci_intr_release(pc, adapter->osdep.intrs,
2623 adapter->osdep.nintrs);
2624 aprint_error_dev(dev,
2625 "Failed to register QUE handler\n");
2626 kcpuset_destroy(affinity);
2627 return ENXIO;
2628 }
2629 que->msix = vector;
2630 adapter->active_queues |= (u64)(1 << que->msix);
2631 #ifdef RSS
2632 /*
2633 * The queue ID is used as the RSS layer bucket ID.
2634 * We look up the queue ID -> RSS CPU ID and select
2635 * that.
2636 */
2637 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2638 #else
2639 /*
2640 * Bind the msix vector, and thus the
2641 * rings to the corresponding cpu.
2642 *
2643 * This just happens to match the default RSS round-robin
2644 * bucket -> queue -> CPU allocation.
2645 */
2646 if (adapter->num_queues > 1)
2647 cpu_id = i;
2648 #endif
2649 /* Round-robin affinity */
2650 kcpuset_zero(affinity);
2651 kcpuset_set(affinity, cpu_id % ncpu);
2652 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2653 NULL);
2654 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2655 intrstr);
2656 if (error == 0) {
2657 #ifdef IXGBE_DEBUG
2658 #ifdef RSS
2659 aprintf_normal(
2660 ", bound RSS bucket %d to CPU %d\n",
2661 i, cpu_id);
2662 #else
2663 aprint_normal(
2664 ", bound queue %d to cpu %d\n",
2665 i, cpu_id);
2666 #endif
2667 #endif /* IXGBE_DEBUG */
2668 } else
2669 aprint_normal("\n");
2670 #ifndef IXGBE_LEGACY_TX
2671 txr->txq_si = softint_establish(SOFTINT_NET,
2672 ixgbe_deferred_mq_start, txr);
2673 #endif
2674 que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que,
2675 que);
2676 if (que->que_si == NULL) {
2677 aprint_error_dev(dev,
2678 "could not establish software interrupt\n");
2679 }
2680 }
2681
2682 /* and Link */
2683 cpu_id++;
2684 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2685 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2686 sizeof(intrbuf));
2687 #ifdef IXG_MPSAFE
2688 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2689 true);
2690 #endif
2691 /* Set the link handler function */
2692 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2693 adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
2694 intr_xname);
2695 if (adapter->osdep.ihs[vector] == NULL) {
2696 adapter->res = NULL;
2697 aprint_error_dev(dev, "Failed to register LINK handler\n");
2698 kcpuset_destroy(affinity);
2699 return (ENXIO);
2700 }
2701 /* Round-robin affinity */
2702 kcpuset_zero(affinity);
2703 kcpuset_set(affinity, cpu_id % ncpu);
2704 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2705
2706 aprint_normal_dev(dev,
2707 "for link, interrupting at %s", intrstr);
2708 if (error == 0)
2709 aprint_normal(", affinity to cpu %d\n", cpu_id);
2710 else
2711 aprint_normal("\n");
2712
2713 adapter->vector = vector;
2714 /* Tasklets for Link, SFP and Multispeed Fiber */
2715 adapter->link_si =
2716 softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
2717 adapter->mod_si =
2718 softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
2719 adapter->msf_si =
2720 softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
2721 #ifdef PCI_IOV
2722 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2723 #endif
2724 adapter->phy_si =
2725 softint_establish(SOFTINT_NET, ixgbe_handle_phy, adapter);
2726 #ifdef IXGBE_FDIR
2727 adapter->fdir_si =
2728 softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
2729 #endif
2730
2731 kcpuset_destroy(affinity);
2732 return (0);
2733 }
2734
2735 /*
2736 * Setup Either MSI/X or MSI
2737 */
2738 static int
2739 ixgbe_setup_msix(struct adapter *adapter)
2740 {
2741 device_t dev = adapter->dev;
2742 int want, queues, msgs;
2743
2744 /* Override by tuneable */
2745 if (ixgbe_enable_msix == 0)
2746 goto msi;
2747
2748 /* First try MSI/X */
2749 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2750 if (msgs < IXG_MSIX_NINTR)
2751 goto msi;
2752
2753 adapter->msix_mem = (void *)1; /* XXX */
2754
2755 /* Figure out a reasonable auto config value */
2756 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2757
2758 #ifdef RSS
2759 /* If we're doing RSS, clamp at the number of RSS buckets */
2760 if (queues > rss_getnumbuckets())
2761 queues = rss_getnumbuckets();
2762 #endif
2763
2764 if (ixgbe_num_queues != 0)
2765 queues = ixgbe_num_queues;
2766 /* Set max queues to 8 when autoconfiguring */
2767 else if ((ixgbe_num_queues == 0) && (queues > 8))
2768 queues = 8;
2769
2770 /* reflect correct sysctl value */
2771 ixgbe_num_queues = queues;
2772
2773 /*
2774 ** Want one vector (RX/TX pair) per queue
2775 ** plus an additional for Link.
2776 */
2777 want = queues + 1;
2778 if (msgs >= want)
2779 msgs = want;
2780 else {
2781 aprint_error_dev(dev,
2782 "MSIX Configuration Problem, "
2783 "%d vectors but %d queues wanted!\n",
2784 msgs, want);
2785 goto msi;
2786 }
2787 device_printf(dev,
2788 "Using MSIX interrupts with %d vectors\n", msgs);
2789 adapter->num_queues = queues;
2790 return (msgs);
2791
2792 /*
2793 ** If MSIX alloc failed or provided us with
2794 ** less than needed, free and fall through to MSI
2795 */
2796 msi:
2797 msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
2798 adapter->msix_mem = NULL; /* XXX */
2799 msgs = 1;
2800 aprint_normal_dev(dev, "Using an MSI interrupt\n");
2801 return (msgs);
2802 }
2803
2804
2805 static int
2806 ixgbe_allocate_pci_resources(struct adapter *adapter,
2807 const struct pci_attach_args *pa)
2808 {
2809 pcireg_t memtype;
2810 device_t dev = adapter->dev;
2811 bus_addr_t addr;
2812 int flags;
2813
2814 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
2815 switch (memtype) {
2816 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2817 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2818 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
2819 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
2820 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
2821 goto map_err;
2822 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
2823 aprint_normal_dev(dev, "clearing prefetchable bit\n");
2824 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
2825 }
2826 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
2827 adapter->osdep.mem_size, flags,
2828 &adapter->osdep.mem_bus_space_handle) != 0) {
2829 map_err:
2830 adapter->osdep.mem_size = 0;
2831 aprint_error_dev(dev, "unable to map BAR0\n");
2832 return ENXIO;
2833 }
2834 break;
2835 default:
2836 aprint_error_dev(dev, "unexpected type on BAR0\n");
2837 return ENXIO;
2838 }
2839 adapter->hw.back = adapter;
2840
2841 /* Default to 1 queue if MSI-X setup fails */
2842 adapter->num_queues = 1;
2843
2844 /*
2845 ** Now setup MSI or MSI-X, should
2846 ** return us the number of supported
2847 ** vectors. (Will be 1 for MSI)
2848 */
2849 adapter->msix = ixgbe_setup_msix(adapter);
2850 return (0);
2851 }
2852
2853 static void
2854 ixgbe_free_pci_resources(struct adapter * adapter)
2855 {
2856 struct ix_queue *que = adapter->queues;
2857 int rid;
2858
2859 /*
2860 ** Release all msix queue resources:
2861 */
2862 for (int i = 0; i < adapter->num_queues; i++, que++) {
2863 if (que->res != NULL)
2864 pci_intr_disestablish(adapter->osdep.pc,
2865 adapter->osdep.ihs[i]);
2866 }
2867
2868
2869 /* Clean the Legacy or Link interrupt last */
2870 if (adapter->vector) /* we are doing MSIX */
2871 rid = adapter->vector;
2872 else
2873 rid = 0;
2874
2875 if (adapter->osdep.ihs[rid] != NULL) {
2876 pci_intr_disestablish(adapter->osdep.pc,
2877 adapter->osdep.ihs[rid]);
2878 adapter->osdep.ihs[rid] = NULL;
2879 }
2880
2881 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
2882 adapter->osdep.nintrs);
2883
2884 if (adapter->osdep.mem_size != 0) {
2885 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
2886 adapter->osdep.mem_bus_space_handle,
2887 adapter->osdep.mem_size);
2888 }
2889
2890 return;
2891 }
2892
2893 /*********************************************************************
2894 *
2895 * Setup networking device structure and register an interface.
2896 *
2897 **********************************************************************/
2898 static int
2899 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2900 {
2901 struct ethercom *ec = &adapter->osdep.ec;
2902 struct ifnet *ifp;
2903
2904 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2905
2906 ifp = adapter->ifp = &ec->ec_if;
2907 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
2908 ifp->if_baudrate = IF_Gbps(10);
2909 ifp->if_init = ixgbe_init;
2910 ifp->if_stop = ixgbe_ifstop;
2911 ifp->if_softc = adapter;
2912 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2913 ifp->if_ioctl = ixgbe_ioctl;
2914 #if __FreeBSD_version >= 1100045
2915 /* TSO parameters */
2916 ifp->if_hw_tsomax = 65518;
2917 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2918 ifp->if_hw_tsomaxsegsize = 2048;
2919 #endif
2920 #ifndef IXGBE_LEGACY_TX
2921 ifp->if_transmit = ixgbe_mq_start;
2922 ifp->if_qflush = ixgbe_qflush;
2923 #else
2924 ifp->if_start = ixgbe_start;
2925 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2926 #if 0
2927 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2928 #endif
2929 IFQ_SET_READY(&ifp->if_snd);
2930 #endif
2931
2932 if_initialize(ifp);
2933 ether_ifattach(ifp, adapter->hw.mac.addr);
2934 if_register(ifp);
2935 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
2936
2937 adapter->max_frame_size =
2938 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2939
2940 /*
2941 * Tell the upper layer(s) we support long frames.
2942 */
2943 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2944
2945 /* Set capability flags */
2946 ifp->if_capabilities |= IFCAP_RXCSUM
2947 | IFCAP_TXCSUM
2948 | IFCAP_TSOv4
2949 | IFCAP_TSOv6
2950 | IFCAP_LRO;
2951 ifp->if_capenable = 0;
2952
2953 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
2954 | ETHERCAP_VLAN_HWCSUM
2955 | ETHERCAP_JUMBO_MTU
2956 | ETHERCAP_VLAN_MTU;
2957
2958 /* Enable the above capabilities by default */
2959 ec->ec_capenable = ec->ec_capabilities;
2960
2961 /*
2962 ** Don't turn this on by default, if vlans are
2963 ** created on another pseudo device (eg. lagg)
2964 ** then vlan events are not passed thru, breaking
2965 ** operation, but with HW FILTER off it works. If
2966 ** using vlans directly on the ixgbe driver you can
2967 ** enable this and get full hardware tag filtering.
2968 */
2969 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
2970
2971 /*
2972 * Specify the media types supported by this adapter and register
2973 * callbacks to update media and link information
2974 */
2975 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2976 ixgbe_media_status);
2977
2978 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2979 ixgbe_add_media_types(adapter);
2980
2981 /* Set autoselect media by default */
2982 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2983
2984 return (0);
2985 }
2986
2987 static void
2988 ixgbe_add_media_types(struct adapter *adapter)
2989 {
2990 struct ixgbe_hw *hw = &adapter->hw;
2991 device_t dev = adapter->dev;
2992 int layer;
2993
2994 layer = adapter->phy_layer;
2995
2996 /* Media types with matching NetBSD media defines */
2997 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2998 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2999 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
3000 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
3001 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
3002 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
3003
3004 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
3005 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
3006 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
3007
3008 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
3009 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
3010 if (hw->phy.multispeed_fiber)
3011 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
3012 }
3013 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
3014 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3015 if (hw->phy.multispeed_fiber)
3016 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
3017 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
3018 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
3019 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3020 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3021
3022 #ifdef IFM_ETH_XTYPE
3023 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3024 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
3025 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
3026 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
3027 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3028 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
3029 #else
3030 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
3031 device_printf(dev, "Media supported: 10GbaseKR\n");
3032 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
3033 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3034 }
3035 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
3036 device_printf(dev, "Media supported: 10GbaseKX4\n");
3037 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
3038 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3039 }
3040 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
3041 device_printf(dev, "Media supported: 1000baseKX\n");
3042 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
3043 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
3044 }
3045 #endif
3046 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
3047 device_printf(dev, "Media supported: 1000baseBX\n");
3048
3049 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
3050 ifmedia_add(&adapter->media,
3051 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3052 ifmedia_add(&adapter->media,
3053 IFM_ETHER | IFM_1000_T, 0, NULL);
3054 }
3055
3056 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3057 }
3058
3059 static void
3060 ixgbe_config_link(struct adapter *adapter)
3061 {
3062 struct ixgbe_hw *hw = &adapter->hw;
3063 u32 autoneg, err = 0;
3064 bool sfp, negotiate;
3065
3066 sfp = ixgbe_is_sfp(hw);
3067
3068 if (sfp) {
3069 void *ip;
3070
3071 if (hw->phy.multispeed_fiber) {
3072 hw->mac.ops.setup_sfp(hw);
3073 ixgbe_enable_tx_laser(hw);
3074 ip = adapter->msf_si;
3075 } else {
3076 ip = adapter->mod_si;
3077 }
3078
3079 kpreempt_disable();
3080 softint_schedule(ip);
3081 kpreempt_enable();
3082 } else {
3083 if (hw->mac.ops.check_link)
3084 err = ixgbe_check_link(hw, &adapter->link_speed,
3085 &adapter->link_up, FALSE);
3086 if (err)
3087 goto out;
3088 autoneg = hw->phy.autoneg_advertised;
3089 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3090 err = hw->mac.ops.get_link_capabilities(hw,
3091 &autoneg, &negotiate);
3092 else
3093 negotiate = 0;
3094 if (err)
3095 goto out;
3096 if (hw->mac.ops.setup_link)
3097 err = hw->mac.ops.setup_link(hw,
3098 autoneg, adapter->link_up);
3099 }
3100 out:
3101 return;
3102 }
3103
3104
3105 /*********************************************************************
3106 *
3107 * Enable transmit units.
3108 *
3109 **********************************************************************/
3110 static void
3111 ixgbe_initialize_transmit_units(struct adapter *adapter)
3112 {
3113 struct tx_ring *txr = adapter->tx_rings;
3114 struct ixgbe_hw *hw = &adapter->hw;
3115
3116 /* Setup the Base and Length of the Tx Descriptor Ring */
3117 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3118 u64 tdba = txr->txdma.dma_paddr;
3119 u32 txctrl = 0;
3120 int j = txr->me;
3121
3122 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3123 (tdba & 0x00000000ffffffffULL));
3124 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3125 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3126 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3127
3128 /* Setup the HW Tx Head and Tail descriptor pointers */
3129 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3130 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3131
3132 /* Cache the tail address */
3133 txr->tail = IXGBE_TDT(j);
3134
3135 /* Disable Head Writeback */
3136 /*
3137 * Note: for X550 series devices, these registers are actually
3138 * prefixed with TPH_ isntead of DCA_, but the addresses and
3139 * fields remain the same.
3140 */
3141 switch (hw->mac.type) {
3142 case ixgbe_mac_82598EB:
3143 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3144 break;
3145 default:
3146 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3147 break;
3148 }
3149 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3150 switch (hw->mac.type) {
3151 case ixgbe_mac_82598EB:
3152 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3153 break;
3154 default:
3155 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3156 break;
3157 }
3158
3159 }
3160
3161 if (hw->mac.type != ixgbe_mac_82598EB) {
3162 u32 dmatxctl, rttdcs;
3163 #ifdef PCI_IOV
3164 enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
3165 #endif
3166 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3167 dmatxctl |= IXGBE_DMATXCTL_TE;
3168 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3169 /* Disable arbiter to set MTQC */
3170 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3171 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3172 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3173 #ifdef PCI_IOV
3174 IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
3175 #else
3176 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3177 #endif
3178 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3179 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3180 }
3181
3182 return;
3183 }
3184
3185 static void
3186 ixgbe_initialize_rss_mapping(struct adapter *adapter)
3187 {
3188 struct ixgbe_hw *hw = &adapter->hw;
3189 u32 reta = 0, mrqc, rss_key[10];
3190 int queue_id, table_size, index_mult;
3191 #ifdef RSS
3192 u32 rss_hash_config;
3193 #endif
3194 #ifdef PCI_IOV
3195 enum ixgbe_iov_mode mode;
3196 #endif
3197
3198 #ifdef RSS
3199 /* Fetch the configured RSS key */
3200 rss_getkey((uint8_t *) &rss_key);
3201 #else
3202 /* set up random bits */
3203 cprng_fast(&rss_key, sizeof(rss_key));
3204 #endif
3205
3206 /* Set multiplier for RETA setup and table size based on MAC */
3207 index_mult = 0x1;
3208 table_size = 128;
3209 switch (adapter->hw.mac.type) {
3210 case ixgbe_mac_82598EB:
3211 index_mult = 0x11;
3212 break;
3213 case ixgbe_mac_X550:
3214 case ixgbe_mac_X550EM_x:
3215 table_size = 512;
3216 break;
3217 default:
3218 break;
3219 }
3220
3221 /* Set up the redirection table */
3222 for (int i = 0, j = 0; i < table_size; i++, j++) {
3223 if (j == adapter->num_queues) j = 0;
3224 #ifdef RSS
3225 /*
3226 * Fetch the RSS bucket id for the given indirection entry.
3227 * Cap it at the number of configured buckets (which is
3228 * num_queues.)
3229 */
3230 queue_id = rss_get_indirection_to_bucket(i);
3231 queue_id = queue_id % adapter->num_queues;
3232 #else
3233 queue_id = (j * index_mult);
3234 #endif
3235 /*
3236 * The low 8 bits are for hash value (n+0);
3237 * The next 8 bits are for hash value (n+1), etc.
3238 */
3239 reta = reta >> 8;
3240 reta = reta | ( ((uint32_t) queue_id) << 24);
3241 if ((i & 3) == 3) {
3242 if (i < 128)
3243 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3244 else
3245 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
3246 reta = 0;
3247 }
3248 }
3249
3250 /* Now fill our hash function seeds */
3251 for (int i = 0; i < 10; i++)
3252 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3253
3254 /* Perform hash on these packet types */
3255 #ifdef RSS
3256 mrqc = IXGBE_MRQC_RSSEN;
3257 rss_hash_config = rss_gethashconfig();
3258 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3259 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3260 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3261 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3262 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3263 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3264 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3265 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3266 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3267 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3268 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3269 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3270 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3271 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3272 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3273 device_printf(adapter->dev,
3274 "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3275 "but not supported\n", __func__);
3276 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3277 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3278 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3279 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3280 #else
3281 /*
3282 * Disable UDP - IP fragments aren't currently being handled
3283 * and so we end up with a mix of 2-tuple and 4-tuple
3284 * traffic.
3285 */
3286 mrqc = IXGBE_MRQC_RSSEN
3287 | IXGBE_MRQC_RSS_FIELD_IPV4
3288 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3289 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3290 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3291 | IXGBE_MRQC_RSS_FIELD_IPV6
3292 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3293 ;
3294 #endif /* RSS */
3295 #ifdef PCI_IOV
3296 mode = ixgbe_get_iov_mode(adapter);
3297 mrqc |= ixgbe_get_mrqc(mode);
3298 #endif
3299 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3300 }
3301
3302
3303 /*********************************************************************
3304 *
3305 * Setup receive registers and features.
3306 *
3307 **********************************************************************/
3308 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3309
3310 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3311
3312 static void
3313 ixgbe_initialize_receive_units(struct adapter *adapter)
3314 {
3315 int i;
3316 struct rx_ring *rxr = adapter->rx_rings;
3317 struct ixgbe_hw *hw = &adapter->hw;
3318 struct ifnet *ifp = adapter->ifp;
3319 u32 bufsz, fctrl, srrctl, rxcsum;
3320 u32 hlreg;
3321
3322 /*
3323 * Make sure receives are disabled while
3324 * setting up the descriptor ring
3325 */
3326 ixgbe_disable_rx(hw);
3327
3328 /* Enable broadcasts */
3329 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3330 fctrl |= IXGBE_FCTRL_BAM;
3331 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3332 fctrl |= IXGBE_FCTRL_DPF;
3333 fctrl |= IXGBE_FCTRL_PMCF;
3334 }
3335 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3336
3337 /* Set for Jumbo Frames? */
3338 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3339 if (ifp->if_mtu > ETHERMTU)
3340 hlreg |= IXGBE_HLREG0_JUMBOEN;
3341 else
3342 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3343 #ifdef DEV_NETMAP
3344 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3345 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3346 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3347 else
3348 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3349 #endif /* DEV_NETMAP */
3350 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3351
3352 bufsz = (adapter->rx_mbuf_sz +
3353 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3354
3355 for (i = 0; i < adapter->num_queues; i++, rxr++) {
3356 u64 rdba = rxr->rxdma.dma_paddr;
3357 int j = rxr->me;
3358
3359 /* Setup the Base and Length of the Rx Descriptor Ring */
3360 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3361 (rdba & 0x00000000ffffffffULL));
3362 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3363 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3364 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3365
3366 /* Set up the SRRCTL register */
3367 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3368 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3369 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3370 srrctl |= bufsz;
3371 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3372
3373 /*
3374 * Set DROP_EN iff we have no flow control and >1 queue.
3375 * Note that srrctl was cleared shortly before during reset,
3376 * so we do not need to clear the bit, but do it just in case
3377 * this code is moved elsewhere.
3378 */
3379 if (adapter->num_queues > 1 &&
3380 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3381 srrctl |= IXGBE_SRRCTL_DROP_EN;
3382 } else {
3383 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3384 }
3385
3386 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3387
3388 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3389 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3390 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3391
3392 /* Set the driver rx tail address */
3393 rxr->tail = IXGBE_RDT(rxr->me);
3394 }
3395
3396 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3397 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3398 IXGBE_PSRTYPE_UDPHDR |
3399 IXGBE_PSRTYPE_IPV4HDR |
3400 IXGBE_PSRTYPE_IPV6HDR;
3401 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3402 }
3403
3404 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3405
3406 ixgbe_initialize_rss_mapping(adapter);
3407
3408 if (adapter->num_queues > 1) {
3409 /* RSS and RX IPP Checksum are mutually exclusive */
3410 rxcsum |= IXGBE_RXCSUM_PCSD;
3411 }
3412
3413 if (ifp->if_capenable & IFCAP_RXCSUM)
3414 rxcsum |= IXGBE_RXCSUM_PCSD;
3415
3416 /* This is useful for calculating UDP/IP fragment checksums */
3417 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3418 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3419
3420 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3421
3422 return;
3423 }
3424
3425
3426 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
3427 /*
3428 ** This routine is run via an vlan config EVENT,
3429 ** it enables us to use the HW Filter table since
3430 ** we can get the vlan id. This just creates the
3431 ** entry in the soft version of the VFTA, init will
3432 ** repopulate the real table.
3433 */
3434 static void
3435 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3436 {
3437 struct adapter *adapter = ifp->if_softc;
3438 u16 index, bit;
3439
3440 if (ifp->if_softc != arg) /* Not our event */
3441 return;
3442
3443 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3444 return;
3445
3446 IXGBE_CORE_LOCK(adapter);
3447 index = (vtag >> 5) & 0x7F;
3448 bit = vtag & 0x1F;
3449 adapter->shadow_vfta[index] |= (1 << bit);
3450 ixgbe_setup_vlan_hw_support(adapter);
3451 IXGBE_CORE_UNLOCK(adapter);
3452 }
3453
3454 /*
3455 ** This routine is run via an vlan
3456 ** unconfig EVENT, remove our entry
3457 ** in the soft vfta.
3458 */
3459 static void
3460 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3461 {
3462 struct adapter *adapter = ifp->if_softc;
3463 u16 index, bit;
3464
3465 if (ifp->if_softc != arg)
3466 return;
3467
3468 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3469 return;
3470
3471 IXGBE_CORE_LOCK(adapter);
3472 index = (vtag >> 5) & 0x7F;
3473 bit = vtag & 0x1F;
3474 adapter->shadow_vfta[index] &= ~(1 << bit);
3475 /* Re-init to load the changes */
3476 ixgbe_setup_vlan_hw_support(adapter);
3477 IXGBE_CORE_UNLOCK(adapter);
3478 }
3479 #endif
3480
3481 static void
3482 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3483 {
3484 struct ethercom *ec = &adapter->osdep.ec;
3485 struct ixgbe_hw *hw = &adapter->hw;
3486 struct rx_ring *rxr;
3487 u32 ctrl;
3488
3489
3490 /*
3491 ** We get here thru init_locked, meaning
3492 ** a soft reset, this has already cleared
3493 ** the VFTA and other state, so if there
3494 ** have been no vlan's registered do nothing.
3495 */
3496 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3497 return;
3498
3499 /* Setup the queues for vlans */
3500 for (int i = 0; i < adapter->num_queues; i++) {
3501 rxr = &adapter->rx_rings[i];
3502 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3503 if (hw->mac.type != ixgbe_mac_82598EB) {
3504 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3505 ctrl |= IXGBE_RXDCTL_VME;
3506 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3507 }
3508 rxr->vtag_strip = TRUE;
3509 }
3510
3511 if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
3512 return;
3513 /*
3514 ** A soft reset zero's out the VFTA, so
3515 ** we need to repopulate it now.
3516 */
3517 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3518 if (adapter->shadow_vfta[i] != 0)
3519 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3520 adapter->shadow_vfta[i]);
3521
3522 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3523 /* Enable the Filter Table if enabled */
3524 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
3525 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3526 ctrl |= IXGBE_VLNCTRL_VFE;
3527 }
3528 if (hw->mac.type == ixgbe_mac_82598EB)
3529 ctrl |= IXGBE_VLNCTRL_VME;
3530 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3531 }
3532
3533 static void
3534 ixgbe_enable_intr(struct adapter *adapter)
3535 {
3536 struct ixgbe_hw *hw = &adapter->hw;
3537 struct ix_queue *que = adapter->queues;
3538 u32 mask, fwsm;
3539
3540 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3541 /* Enable Fan Failure detection */
3542 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3543 mask |= IXGBE_EIMS_GPI_SDP1;
3544
3545 switch (adapter->hw.mac.type) {
3546 case ixgbe_mac_82599EB:
3547 mask |= IXGBE_EIMS_ECC;
3548 /* Temperature sensor on some adapters */
3549 mask |= IXGBE_EIMS_GPI_SDP0;
3550 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3551 mask |= IXGBE_EIMS_GPI_SDP1;
3552 mask |= IXGBE_EIMS_GPI_SDP2;
3553 #ifdef IXGBE_FDIR
3554 mask |= IXGBE_EIMS_FLOW_DIR;
3555 #endif
3556 #ifdef PCI_IOV
3557 mask |= IXGBE_EIMS_MAILBOX;
3558 #endif
3559 break;
3560 case ixgbe_mac_X540:
3561 /* Detect if Thermal Sensor is enabled */
3562 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3563 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3564 mask |= IXGBE_EIMS_TS;
3565 mask |= IXGBE_EIMS_ECC;
3566 #ifdef IXGBE_FDIR
3567 mask |= IXGBE_EIMS_FLOW_DIR;
3568 #endif
3569 break;
3570 case ixgbe_mac_X550:
3571 case ixgbe_mac_X550EM_x:
3572 /* MAC thermal sensor is automatically enabled */
3573 mask |= IXGBE_EIMS_TS;
3574 /* Some devices use SDP0 for important information */
3575 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3576 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3577 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3578 mask |= IXGBE_EIMS_ECC;
3579 #ifdef IXGBE_FDIR
3580 mask |= IXGBE_EIMS_FLOW_DIR;
3581 #endif
3582 #ifdef PCI_IOV
3583 mask |= IXGBE_EIMS_MAILBOX;
3584 #endif
3585 /* falls through */
3586 default:
3587 break;
3588 }
3589
3590 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3591
3592 /* With MSI-X we use auto clear */
3593 if (adapter->msix_mem) {
3594 mask = IXGBE_EIMS_ENABLE_MASK;
3595 /* Don't autoclear Link */
3596 mask &= ~IXGBE_EIMS_OTHER;
3597 mask &= ~IXGBE_EIMS_LSC;
3598 #ifdef PCI_IOV
3599 mask &= ~IXGBE_EIMS_MAILBOX;
3600 #endif
3601 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3602 }
3603
3604 /*
3605 ** Now enable all queues, this is done separately to
3606 ** allow for handling the extended (beyond 32) MSIX
3607 ** vectors that can be used by 82599
3608 */
3609 for (int i = 0; i < adapter->num_queues; i++, que++)
3610 ixgbe_enable_queue(adapter, que->msix);
3611
3612 IXGBE_WRITE_FLUSH(hw);
3613
3614 return;
3615 }
3616
3617 static void
3618 ixgbe_disable_intr(struct adapter *adapter)
3619 {
3620 if (adapter->msix_mem)
3621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3622 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3624 } else {
3625 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3628 }
3629 IXGBE_WRITE_FLUSH(&adapter->hw);
3630 return;
3631 }
3632
3633 /*
3634 ** Get the width and transaction speed of
3635 ** the slot this adapter is plugged into.
3636 */
3637 static void
3638 ixgbe_get_slot_info(struct adapter *adapter)
3639 {
3640 device_t dev = adapter->dev;
3641 struct ixgbe_hw *hw = &adapter->hw;
3642 struct ixgbe_mac_info *mac = &hw->mac;
3643 u16 link;
3644
3645 /* For most devices simply call the shared code routine */
3646 if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3647 ixgbe_get_bus_info(hw);
3648 /* These devices don't use PCI-E */
3649 switch (hw->mac.type) {
3650 case ixgbe_mac_X550EM_x:
3651 return;
3652 default:
3653 goto display;
3654 }
3655 }
3656
3657 /*
3658 ** For the Quad port adapter we need to parse back
3659 ** up the PCI tree to find the speed of the expansion
3660 ** slot into which this adapter is plugged. A bit more work.
3661 */
3662 dev = device_parent(device_parent(dev));
3663 #ifdef IXGBE_DEBUG
3664 device_printf(dev, "parent pcib = %x,%x,%x\n",
3665 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3666 #endif
3667 dev = device_parent(device_parent(dev));
3668 #ifdef IXGBE_DEBUG
3669 device_printf(dev, "slot pcib = %x,%x,%x\n",
3670 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3671 #endif
3672 /* Now get the PCI Express Capabilities offset */
3673 /* ...and read the Link Status Register */
3674 link = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
3675 switch (link & IXGBE_PCI_LINK_WIDTH) {
3676 case IXGBE_PCI_LINK_WIDTH_1:
3677 hw->bus.width = ixgbe_bus_width_pcie_x1;
3678 break;
3679 case IXGBE_PCI_LINK_WIDTH_2:
3680 hw->bus.width = ixgbe_bus_width_pcie_x2;
3681 break;
3682 case IXGBE_PCI_LINK_WIDTH_4:
3683 hw->bus.width = ixgbe_bus_width_pcie_x4;
3684 break;
3685 case IXGBE_PCI_LINK_WIDTH_8:
3686 hw->bus.width = ixgbe_bus_width_pcie_x8;
3687 break;
3688 default:
3689 hw->bus.width = ixgbe_bus_width_unknown;
3690 break;
3691 }
3692
3693 switch (link & IXGBE_PCI_LINK_SPEED) {
3694 case IXGBE_PCI_LINK_SPEED_2500:
3695 hw->bus.speed = ixgbe_bus_speed_2500;
3696 break;
3697 case IXGBE_PCI_LINK_SPEED_5000:
3698 hw->bus.speed = ixgbe_bus_speed_5000;
3699 break;
3700 case IXGBE_PCI_LINK_SPEED_8000:
3701 hw->bus.speed = ixgbe_bus_speed_8000;
3702 break;
3703 default:
3704 hw->bus.speed = ixgbe_bus_speed_unknown;
3705 break;
3706 }
3707
3708 mac->ops.set_lan_id(hw);
3709
3710 display:
3711 device_printf(dev,"PCI Express Bus: Speed %s Width %s\n",
3712 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3713 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3714 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3715 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
3716 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
3717 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
3718 ("Unknown"));
3719
3720 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3721 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3722 (hw->bus.speed == ixgbe_bus_speed_2500))) {
3723 device_printf(dev, "PCI-Express bandwidth available"
3724 " for this card\n is not sufficient for"
3725 " optimal performance.\n");
3726 device_printf(dev, "For optimal performance a x8 "
3727 "PCIE, or x4 PCIE Gen2 slot is required.\n");
3728 }
3729 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3730 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3731 (hw->bus.speed < ixgbe_bus_speed_8000))) {
3732 device_printf(dev, "PCI-Express bandwidth available"
3733 " for this card\n is not sufficient for"
3734 " optimal performance.\n");
3735 device_printf(dev, "For optimal performance a x8 "
3736 "PCIE Gen3 slot is required.\n");
3737 }
3738
3739 return;
3740 }
3741
3742
3743 /*
3744 ** Setup the correct IVAR register for a particular MSIX interrupt
3745 ** (yes this is all very magic and confusing :)
3746 ** - entry is the register array entry
3747 ** - vector is the MSIX vector for this queue
3748 ** - type is RX/TX/MISC
3749 */
3750 static void
3751 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3752 {
3753 struct ixgbe_hw *hw = &adapter->hw;
3754 u32 ivar, index;
3755
3756 vector |= IXGBE_IVAR_ALLOC_VAL;
3757
3758 switch (hw->mac.type) {
3759
3760 case ixgbe_mac_82598EB:
3761 if (type == -1)
3762 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3763 else
3764 entry += (type * 64);
3765 index = (entry >> 2) & 0x1F;
3766 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3767 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3768 ivar |= (vector << (8 * (entry & 0x3)));
3769 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3770 break;
3771
3772 case ixgbe_mac_82599EB:
3773 case ixgbe_mac_X540:
3774 case ixgbe_mac_X550:
3775 case ixgbe_mac_X550EM_x:
3776 if (type == -1) { /* MISC IVAR */
3777 index = (entry & 1) * 8;
3778 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3779 ivar &= ~(0xFF << index);
3780 ivar |= (vector << index);
3781 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3782 } else { /* RX/TX IVARS */
3783 index = (16 * (entry & 1)) + (8 * type);
3784 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3785 ivar &= ~(0xFF << index);
3786 ivar |= (vector << index);
3787 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3788 }
3789
3790 default:
3791 break;
3792 }
3793 }
3794
3795 static void
3796 ixgbe_configure_ivars(struct adapter *adapter)
3797 {
3798 struct ix_queue *que = adapter->queues;
3799 u32 newitr;
3800
3801 if (ixgbe_max_interrupt_rate > 0)
3802 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3803 else {
3804 /*
3805 ** Disable DMA coalescing if interrupt moderation is
3806 ** disabled.
3807 */
3808 adapter->dmac = 0;
3809 newitr = 0;
3810 }
3811
3812 for (int i = 0; i < adapter->num_queues; i++, que++) {
3813 struct rx_ring *rxr = &adapter->rx_rings[i];
3814 struct tx_ring *txr = &adapter->tx_rings[i];
3815 /* First the RX queue entry */
3816 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3817 /* ... and the TX */
3818 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3819 /* Set an Initial EITR value */
3820 IXGBE_WRITE_REG(&adapter->hw,
3821 IXGBE_EITR(que->msix), newitr);
3822 }
3823
3824 /* For the Link interrupt */
3825 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3826 }
3827
3828 /*
3829 ** ixgbe_sfp_probe - called in the local timer to
3830 ** determine if a port had optics inserted.
3831 */
3832 static bool
3833 ixgbe_sfp_probe(struct adapter *adapter)
3834 {
3835 struct ixgbe_hw *hw = &adapter->hw;
3836 device_t dev = adapter->dev;
3837 bool result = FALSE;
3838
3839 if ((hw->phy.type == ixgbe_phy_nl) &&
3840 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3841 s32 ret = hw->phy.ops.identify_sfp(hw);
3842 if (ret)
3843 goto out;
3844 ret = hw->phy.ops.reset(hw);
3845 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3846 device_printf(dev,"Unsupported SFP+ module detected!");
3847 device_printf(dev, "Reload driver with supported module.\n");
3848 adapter->sfp_probe = FALSE;
3849 goto out;
3850 } else
3851 device_printf(dev, "SFP+ module detected!\n");
3852 /* We now have supported optics */
3853 adapter->sfp_probe = FALSE;
3854 /* Set the optics type so system reports correctly */
3855 ixgbe_setup_optics(adapter);
3856 result = TRUE;
3857 }
3858 out:
3859 return (result);
3860 }
3861
3862 /*
3863 ** Tasklet handler for MSIX Link interrupts
3864 ** - do outside interrupt since it might sleep
3865 */
3866 static void
3867 ixgbe_handle_link(void *context)
3868 {
3869 struct adapter *adapter = context;
3870 struct ixgbe_hw *hw = &adapter->hw;
3871
3872 ixgbe_check_link(hw,
3873 &adapter->link_speed, &adapter->link_up, 0);
3874 ixgbe_update_link_status(adapter);
3875
3876 /* Re-enable link interrupts */
3877 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3878 }
3879
3880 /*
3881 ** Tasklet for handling SFP module interrupts
3882 */
3883 static void
3884 ixgbe_handle_mod(void *context)
3885 {
3886 struct adapter *adapter = context;
3887 struct ixgbe_hw *hw = &adapter->hw;
3888 device_t dev = adapter->dev;
3889 u32 err;
3890
3891 err = hw->phy.ops.identify_sfp(hw);
3892 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3893 device_printf(dev,
3894 "Unsupported SFP+ module type was detected.\n");
3895 return;
3896 }
3897
3898 err = hw->mac.ops.setup_sfp(hw);
3899 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3900 device_printf(dev,
3901 "Setup failure - unsupported SFP+ module type.\n");
3902 return;
3903 }
3904 softint_schedule(adapter->msf_si);
3905 return;
3906 }
3907
3908
3909 /*
3910 ** Tasklet for handling MSF (multispeed fiber) interrupts
3911 */
3912 static void
3913 ixgbe_handle_msf(void *context)
3914 {
3915 struct adapter *adapter = context;
3916 struct ixgbe_hw *hw = &adapter->hw;
3917 u32 autoneg;
3918 bool negotiate;
3919
3920 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3921 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3922
3923 autoneg = hw->phy.autoneg_advertised;
3924 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3925 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3926 else
3927 negotiate = 0;
3928 if (hw->mac.ops.setup_link)
3929 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3930
3931 /* Adjust media types shown in ifconfig */
3932 ifmedia_removeall(&adapter->media);
3933 ixgbe_add_media_types(adapter);
3934 return;
3935 }
3936
3937 /*
3938 ** Tasklet for handling interrupts from an external PHY
3939 */
3940 static void
3941 ixgbe_handle_phy(void *context)
3942 {
3943 struct adapter *adapter = context;
3944 struct ixgbe_hw *hw = &adapter->hw;
3945 int error;
3946
3947 error = hw->phy.ops.handle_lasi(hw);
3948 if (error == IXGBE_ERR_OVERTEMP)
3949 device_printf(adapter->dev,
3950 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3951 " PHY will downshift to lower power state!\n");
3952 else if (error)
3953 device_printf(adapter->dev,
3954 "Error handling LASI interrupt: %d\n",
3955 error);
3956 return;
3957 }
3958
3959 #ifdef IXGBE_FDIR
3960 /*
3961 ** Tasklet for reinitializing the Flow Director filter table
3962 */
3963 static void
3964 ixgbe_reinit_fdir(void *context)
3965 {
3966 struct adapter *adapter = context;
3967 struct ifnet *ifp = adapter->ifp;
3968
3969 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3970 return;
3971 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3972 adapter->fdir_reinit = 0;
3973 /* re-enable flow director interrupts */
3974 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3975 /* Restart the interface */
3976 ifp->if_flags |= IFF_RUNNING;
3977 return;
3978 }
3979 #endif
3980
3981 /*********************************************************************
3982 *
3983 * Configure DMA Coalescing
3984 *
3985 **********************************************************************/
3986 static void
3987 ixgbe_config_dmac(struct adapter *adapter)
3988 {
3989 struct ixgbe_hw *hw = &adapter->hw;
3990 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3991
3992 if (hw->mac.type < ixgbe_mac_X550 ||
3993 !hw->mac.ops.dmac_config)
3994 return;
3995
3996 if (dcfg->watchdog_timer ^ adapter->dmac ||
3997 dcfg->link_speed ^ adapter->link_speed) {
3998 dcfg->watchdog_timer = adapter->dmac;
3999 dcfg->fcoe_en = false;
4000 dcfg->link_speed = adapter->link_speed;
4001 dcfg->num_tcs = 1;
4002
4003 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4004 dcfg->watchdog_timer, dcfg->link_speed);
4005
4006 hw->mac.ops.dmac_config(hw);
4007 }
4008 }
4009
4010 /*
4011 * Checks whether the adapter's ports are capable of
4012 * Wake On LAN by reading the adapter's NVM.
4013 *
4014 * Sets each port's hw->wol_enabled value depending
4015 * on the value read here.
4016 */
4017 static void
4018 ixgbe_check_wol_support(struct adapter *adapter)
4019 {
4020 struct ixgbe_hw *hw = &adapter->hw;
4021 u16 dev_caps = 0;
4022
4023 /* Find out WoL support for port */
4024 adapter->wol_support = hw->wol_enabled = 0;
4025 ixgbe_get_device_caps(hw, &dev_caps);
4026 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
4027 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
4028 hw->bus.func == 0))
4029 adapter->wol_support = hw->wol_enabled = 1;
4030
4031 /* Save initial wake up filter configuration */
4032 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
4033
4034 return;
4035 }
4036
4037 /*
4038 * Prepare the adapter/port for LPLU and/or WoL
4039 */
4040 static int
4041 ixgbe_setup_low_power_mode(struct adapter *adapter)
4042 {
4043 struct ixgbe_hw *hw = &adapter->hw;
4044 device_t dev = adapter->dev;
4045 s32 error = 0;
4046
4047 KASSERT(mutex_owned(&adapter->core_mtx));
4048
4049 /* Limit power management flow to X550EM baseT */
4050 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
4051 && hw->phy.ops.enter_lplu) {
4052 /* Turn off support for APM wakeup. (Using ACPI instead) */
4053 IXGBE_WRITE_REG(hw, IXGBE_GRC,
4054 IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4055
4056 /*
4057 * Clear Wake Up Status register to prevent any previous wakeup
4058 * events from waking us up immediately after we suspend.
4059 */
4060 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4061
4062 /*
4063 * Program the Wakeup Filter Control register with user filter
4064 * settings
4065 */
4066 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4067
4068 /* Enable wakeups and power management in Wakeup Control */
4069 IXGBE_WRITE_REG(hw, IXGBE_WUC,
4070 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4071
4072 /* X550EM baseT adapters need a special LPLU flow */
4073 hw->phy.reset_disable = true;
4074 ixgbe_stop(adapter);
4075 error = hw->phy.ops.enter_lplu(hw);
4076 if (error)
4077 device_printf(dev,
4078 "Error entering LPLU: %d\n", error);
4079 hw->phy.reset_disable = false;
4080 } else {
4081 /* Just stop for other adapters */
4082 ixgbe_stop(adapter);
4083 }
4084
4085 return error;
4086 }
4087
4088 /**********************************************************************
4089 *
4090 * Update the board statistics counters.
4091 *
4092 **********************************************************************/
4093 static void
4094 ixgbe_update_stats_counters(struct adapter *adapter)
4095 {
4096 struct ifnet *ifp = adapter->ifp;
4097 struct ixgbe_hw *hw = &adapter->hw;
4098 u32 missed_rx = 0, bprc, lxon, lxoff, total;
4099 u64 total_missed_rx = 0;
4100 uint64_t crcerrs, rlec;
4101 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4102
4103 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4104 stats->crcerrs.ev_count += crcerrs;
4105 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4106 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4107 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4108
4109 for (int i = 0; i < __arraycount(stats->qprc); i++) {
4110 int j = i % adapter->num_queues;
4111 stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4112 stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4113 stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4114 }
4115 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
4116 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
4117 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
4118 stats->rlec.ev_count += rlec;
4119
4120 /* Hardware workaround, gprc counts missed packets */
4121 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
4122
4123 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4124 stats->lxontxc.ev_count += lxon;
4125 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4126 stats->lxofftxc.ev_count += lxoff;
4127 total = lxon + lxoff;
4128
4129 if (hw->mac.type != ixgbe_mac_82598EB) {
4130 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4131 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4132 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4133 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
4134 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
4135 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4136 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4137 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4138 } else {
4139 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4140 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4141 /* 82598 only has a counter in the high register */
4142 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
4143 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
4144 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
4145 }
4146
4147 /*
4148 * Workaround: mprc hardware is incorrectly counting
4149 * broadcasts, so for now we subtract those.
4150 */
4151 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4152 stats->bprc.ev_count += bprc;
4153 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
4154
4155 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
4156 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
4157 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
4158 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
4159 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4160 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4161
4162 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
4163 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
4164 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
4165
4166 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
4167 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
4168 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
4169 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
4170 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4171 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4172 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4173 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
4174 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
4175 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
4176 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
4177 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
4178 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4179 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4180 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
4181 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
4182 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4183 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4184 /* Only read FCOE on 82599 */
4185 if (hw->mac.type != ixgbe_mac_82598EB) {
4186 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4187 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4188 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4189 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4190 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4191 }
4192
4193 /* Fill out the OS statistics structure */
4194 /*
4195 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
4196 * adapter->stats counters. It's required to make ifconfig -z
4197 * (SOICZIFDATA) work.
4198 */
4199 ifp->if_collisions = 0;
4200
4201 /* Rx Errors */
4202 ifp->if_iqdrops += total_missed_rx;
4203 ifp->if_ierrors += crcerrs + rlec;
4204 }
4205
4206 /** ixgbe_sysctl_tdh_handler - Handler function
4207 * Retrieves the TDH value from the hardware
4208 */
4209 static int
4210 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
4211 {
4212 struct sysctlnode node = *rnode;
4213 uint32_t val;
4214 struct tx_ring *txr;
4215
4216 txr = (struct tx_ring *)node.sysctl_data;
4217 if (txr == NULL)
4218 return 0;
4219 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4220 node.sysctl_data = &val;
4221 return sysctl_lookup(SYSCTLFN_CALL(&node));
4222 }
4223
4224 /** ixgbe_sysctl_tdt_handler - Handler function
4225 * Retrieves the TDT value from the hardware
4226 */
4227 static int
4228 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
4229 {
4230 struct sysctlnode node = *rnode;
4231 uint32_t val;
4232 struct tx_ring *txr;
4233
4234 txr = (struct tx_ring *)node.sysctl_data;
4235 if (txr == NULL)
4236 return 0;
4237 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4238 node.sysctl_data = &val;
4239 return sysctl_lookup(SYSCTLFN_CALL(&node));
4240 }
4241
4242 /** ixgbe_sysctl_rdh_handler - Handler function
4243 * Retrieves the RDH value from the hardware
4244 */
4245 static int
4246 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
4247 {
4248 struct sysctlnode node = *rnode;
4249 uint32_t val;
4250 struct rx_ring *rxr;
4251
4252 rxr = (struct rx_ring *)node.sysctl_data;
4253 if (rxr == NULL)
4254 return 0;
4255 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4256 node.sysctl_data = &val;
4257 return sysctl_lookup(SYSCTLFN_CALL(&node));
4258 }
4259
4260 /** ixgbe_sysctl_rdt_handler - Handler function
4261 * Retrieves the RDT value from the hardware
4262 */
4263 static int
4264 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
4265 {
4266 struct sysctlnode node = *rnode;
4267 uint32_t val;
4268 struct rx_ring *rxr;
4269
4270 rxr = (struct rx_ring *)node.sysctl_data;
4271 if (rxr == NULL)
4272 return 0;
4273 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4274 node.sysctl_data = &val;
4275 return sysctl_lookup(SYSCTLFN_CALL(&node));
4276 }
4277
4278 static int
4279 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
4280 {
4281 struct sysctlnode node = *rnode;
4282 struct ix_queue *que;
4283 uint32_t reg, usec, rate;
4284 int error;
4285
4286 que = (struct ix_queue *)node.sysctl_data;
4287 if (que == NULL)
4288 return 0;
4289 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4290 usec = ((reg & 0x0FF8) >> 3);
4291 if (usec > 0)
4292 rate = 500000 / usec;
4293 else
4294 rate = 0;
4295 node.sysctl_data = &rate;
4296 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4297 if (error)
4298 return error;
4299 reg &= ~0xfff; /* default, no limitation */
4300 ixgbe_max_interrupt_rate = 0;
4301 if (rate > 0 && rate < 500000) {
4302 if (rate < 1000)
4303 rate = 1000;
4304 ixgbe_max_interrupt_rate = rate;
4305 reg |= ((4000000/rate) & 0xff8 );
4306 }
4307 IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4308 return 0;
4309 }
4310
4311 const struct sysctlnode *
4312 ixgbe_sysctl_instance(struct adapter *adapter)
4313 {
4314 const char *dvname;
4315 struct sysctllog **log;
4316 int rc;
4317 const struct sysctlnode *rnode;
4318
4319 log = &adapter->sysctllog;
4320 dvname = device_xname(adapter->dev);
4321
4322 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
4323 0, CTLTYPE_NODE, dvname,
4324 SYSCTL_DESCR("ixgbe information and settings"),
4325 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
4326 goto err;
4327
4328 return rnode;
4329 err:
4330 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
4331 return NULL;
4332 }
4333
4334 static void
4335 ixgbe_add_device_sysctls(struct adapter *adapter)
4336 {
4337 device_t dev = adapter->dev;
4338 struct ixgbe_hw *hw = &adapter->hw;
4339 struct sysctllog **log;
4340 const struct sysctlnode *rnode, *cnode;
4341
4342 log = &adapter->sysctllog;
4343
4344 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4345 aprint_error_dev(dev, "could not create sysctl root\n");
4346 return;
4347 }
4348
4349 if (sysctl_createv(log, 0, &rnode, &cnode,
4350 CTLFLAG_READONLY, CTLTYPE_INT,
4351 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
4352 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
4353 aprint_error_dev(dev, "could not create sysctl\n");
4354
4355 if (sysctl_createv(log, 0, &rnode, &cnode,
4356 CTLFLAG_READONLY, CTLTYPE_INT,
4357 "num_queues", SYSCTL_DESCR("Number of queues"),
4358 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
4359 aprint_error_dev(dev, "could not create sysctl\n");
4360
4361 /* Sysctls for all devices */
4362 if (sysctl_createv(log, 0, &rnode, &cnode,
4363 CTLFLAG_READWRITE, CTLTYPE_INT,
4364 "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
4365 ixgbe_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4366 aprint_error_dev(dev, "could not create sysctl\n");
4367
4368 /* XXX This is an *instance* sysctl controlling a *global* variable.
4369 * XXX It's that way in the FreeBSD driver that this derives from.
4370 */
4371 if (sysctl_createv(log, 0, &rnode, &cnode,
4372 CTLFLAG_READWRITE, CTLTYPE_INT,
4373 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
4374 NULL, 0, &ixgbe_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
4375 aprint_error_dev(dev, "could not create sysctl\n");
4376
4377 if (sysctl_createv(log, 0, &rnode, &cnode,
4378 CTLFLAG_READWRITE, CTLTYPE_INT,
4379 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
4380 ixgbe_set_advertise, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4381 aprint_error_dev(dev, "could not create sysctl\n");
4382
4383 if (sysctl_createv(log, 0, &rnode, &cnode,
4384 CTLFLAG_READWRITE, CTLTYPE_INT,
4385 "ts", SYSCTL_DESCR("Thermal Test"),
4386 ixgbe_sysctl_thermal_test, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4387 aprint_error_dev(dev, "could not create sysctl\n");
4388
4389 #ifdef IXGBE_DEBUG
4390 /* testing sysctls (for all devices) */
4391 if (sysctl_createv(log, 0, &rnode, &cnode,
4392 CTLFLAG_READWRITE, CTLTYPE_INT,
4393 "power_state", SYSCTL_DESCR("PCI Power State"),
4394 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4395 aprint_error_dev(dev, "could not create sysctl\n");
4396
4397 if (sysctl_createv(log, 0, &rnode, &cnode,
4398 CTLFLAG_READONLY, CTLTYPE_STRING,
4399 "print_rss_config", SYSCTL_DESCR("Prints RSS Configuration"),
4400 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4401 aprint_error_dev(dev, "could not create sysctl\n");
4402 #endif
4403 /* for X550 series devices */
4404 if (hw->mac.type >= ixgbe_mac_X550)
4405 if (sysctl_createv(log, 0, &rnode, &cnode,
4406 CTLFLAG_READWRITE, CTLTYPE_INT,
4407 "dmac", SYSCTL_DESCR("DMA Coalesce"),
4408 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4409 aprint_error_dev(dev, "could not create sysctl\n");
4410
4411 /* for X552 backplane devices */
4412 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4413 const struct sysctlnode *eee_node;
4414
4415 if (sysctl_createv(log, 0, &rnode, &eee_node,
4416 0, CTLTYPE_NODE,
4417 "eee", SYSCTL_DESCR("Energy Efficient Ethernet sysctls"),
4418 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
4419 aprint_error_dev(dev, "could not create sysctl\n");
4420 return;
4421 }
4422
4423 if (sysctl_createv(log, 0, &eee_node, &cnode,
4424 CTLFLAG_READWRITE, CTLTYPE_INT,
4425 "enable", SYSCTL_DESCR("Enable or Disable EEE"),
4426 ixgbe_sysctl_eee_enable, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4427 aprint_error_dev(dev, "could not create sysctl\n");
4428
4429 if (sysctl_createv(log, 0, &eee_node, &cnode,
4430 CTLFLAG_READONLY, CTLTYPE_BOOL,
4431 "negotiated", SYSCTL_DESCR("EEE negotiated on link"),
4432 ixgbe_sysctl_eee_negotiated, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4433 aprint_error_dev(dev, "could not create sysctl\n");
4434
4435 if (sysctl_createv(log, 0, &eee_node, &cnode,
4436 CTLFLAG_READONLY, CTLTYPE_BOOL,
4437 "tx_lpi_status", SYSCTL_DESCR("Whether or not TX link is in LPI state"),
4438 ixgbe_sysctl_eee_tx_lpi_status, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4439 aprint_error_dev(dev, "could not create sysctl\n");
4440
4441 if (sysctl_createv(log, 0, &eee_node, &cnode,
4442 CTLFLAG_READONLY, CTLTYPE_BOOL,
4443 "rx_lpi_status", SYSCTL_DESCR("Whether or not RX link is in LPI state"),
4444 ixgbe_sysctl_eee_rx_lpi_status, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4445 aprint_error_dev(dev, "could not create sysctl\n");
4446
4447 if (sysctl_createv(log, 0, &eee_node, &cnode,
4448 CTLFLAG_READONLY, CTLTYPE_BOOL,
4449 "tx_lpi_delay", SYSCTL_DESCR("TX LPI entry delay in microseconds"),
4450 ixgbe_sysctl_eee_tx_lpi_delay, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4451 aprint_error_dev(dev, "could not create sysctl\n");
4452 }
4453
4454 /* for WoL-capable devices */
4455 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4456 if (sysctl_createv(log, 0, &rnode, &cnode,
4457 CTLFLAG_READWRITE, CTLTYPE_INT,
4458 "wol_enable", SYSCTL_DESCR("Enable/Disable Wake on LAN"),
4459 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4460 aprint_error_dev(dev, "could not create sysctl\n");
4461
4462 if (sysctl_createv(log, 0, &rnode, &cnode,
4463 CTLFLAG_READWRITE, CTLTYPE_INT,
4464 "wufc", SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
4465 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4466 aprint_error_dev(dev, "could not create sysctl\n");
4467 }
4468
4469 /* for X552/X557-AT devices */
4470 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4471 const struct sysctlnode *phy_node;
4472
4473 if (sysctl_createv(log, 0, &rnode, &phy_node,
4474 0, CTLTYPE_NODE,
4475 "phy", SYSCTL_DESCR("External PHY sysctls"),
4476 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
4477 aprint_error_dev(dev, "could not create sysctl\n");
4478 return;
4479 }
4480
4481 if (sysctl_createv(log, 0, &phy_node, &cnode,
4482 CTLFLAG_READONLY, CTLTYPE_INT,
4483 "temp", SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
4484 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4485 aprint_error_dev(dev, "could not create sysctl\n");
4486
4487 if (sysctl_createv(log, 0, &phy_node, &cnode,
4488 CTLFLAG_READONLY, CTLTYPE_INT,
4489 "overtemp_occurred", SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
4490 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
4491 aprint_error_dev(dev, "could not create sysctl\n");
4492 }
4493 }
4494
4495 /*
4496 * Add sysctl variables, one per statistic, to the system.
4497 */
4498 static void
4499 ixgbe_add_hw_stats(struct adapter *adapter)
4500 {
4501 device_t dev = adapter->dev;
4502 const struct sysctlnode *rnode, *cnode;
4503 struct sysctllog **log = &adapter->sysctllog;
4504 struct tx_ring *txr = adapter->tx_rings;
4505 struct rx_ring *rxr = adapter->rx_rings;
4506 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4507
4508 /* Driver Statistics */
4509 #if 0
4510 /* These counters are not updated by the software */
4511 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4512 CTLFLAG_RD, &adapter->dropped_pkts,
4513 "Driver dropped packets");
4514 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
4515 CTLFLAG_RD, &adapter->mbuf_header_failed,
4516 "???");
4517 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
4518 CTLFLAG_RD, &adapter->mbuf_packet_failed,
4519 "???");
4520 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
4521 CTLFLAG_RD, &adapter->no_tx_map_avail,
4522 "???");
4523 #endif
4524 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
4525 NULL, device_xname(dev), "Handled queue in softint");
4526 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
4527 NULL, device_xname(dev), "Requeued in softint");
4528 evcnt_attach_dynamic(&adapter->morerx, EVCNT_TYPE_MISC,
4529 NULL, device_xname(dev), "Interrupt handler more rx");
4530 evcnt_attach_dynamic(&adapter->moretx, EVCNT_TYPE_MISC,
4531 NULL, device_xname(dev), "Interrupt handler more tx");
4532 evcnt_attach_dynamic(&adapter->txloops, EVCNT_TYPE_MISC,
4533 NULL, device_xname(dev), "Interrupt handler tx loops");
4534 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
4535 NULL, device_xname(dev), "Driver tx dma soft fail EFBIG");
4536 evcnt_attach_dynamic(&adapter->m_defrag_failed, EVCNT_TYPE_MISC,
4537 NULL, device_xname(dev), "m_defrag() failed");
4538 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
4539 NULL, device_xname(dev), "Driver tx dma hard fail EFBIG");
4540 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
4541 NULL, device_xname(dev), "Driver tx dma hard fail EINVAL");
4542 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
4543 NULL, device_xname(dev), "Driver tx dma hard fail other");
4544 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
4545 NULL, device_xname(dev), "Driver tx dma soft fail EAGAIN");
4546 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
4547 NULL, device_xname(dev), "Driver tx dma soft fail ENOMEM");
4548 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
4549 NULL, device_xname(dev), "Watchdog timeouts");
4550 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
4551 NULL, device_xname(dev), "TSO errors");
4552 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_MISC,
4553 NULL, device_xname(dev), "Link MSIX IRQ Handled");
4554
4555 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
4556 snprintf(adapter->queues[i].evnamebuf,
4557 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
4558 device_xname(dev), i);
4559 snprintf(adapter->queues[i].namebuf,
4560 sizeof(adapter->queues[i].namebuf), "q%d", i);
4561
4562 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4563 aprint_error_dev(dev, "could not create sysctl root\n");
4564 break;
4565 }
4566
4567 if (sysctl_createv(log, 0, &rnode, &rnode,
4568 0, CTLTYPE_NODE,
4569 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
4570 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
4571 break;
4572
4573 if (sysctl_createv(log, 0, &rnode, &cnode,
4574 CTLFLAG_READWRITE, CTLTYPE_INT,
4575 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
4576 ixgbe_sysctl_interrupt_rate_handler, 0,
4577 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
4578 break;
4579
4580 #if 0 /* XXX msaitoh */
4581 if (sysctl_createv(log, 0, &rnode, &cnode,
4582 CTLFLAG_READONLY, CTLTYPE_QUAD,
4583 "irqs", SYSCTL_DESCR("irqs on this queue"),
4584 NULL, 0, &(adapter->queues[i].irqs),
4585 0, CTL_CREATE, CTL_EOL) != 0)
4586 break;
4587 #endif
4588
4589 if (sysctl_createv(log, 0, &rnode, &cnode,
4590 CTLFLAG_READONLY, CTLTYPE_INT,
4591 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
4592 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
4593 0, CTL_CREATE, CTL_EOL) != 0)
4594 break;
4595
4596 if (sysctl_createv(log, 0, &rnode, &cnode,
4597 CTLFLAG_READONLY, CTLTYPE_INT,
4598 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
4599 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
4600 0, CTL_CREATE, CTL_EOL) != 0)
4601 break;
4602
4603 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
4604 NULL, device_xname(dev), "TSO");
4605 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
4606 NULL, adapter->queues[i].evnamebuf,
4607 "Queue No Descriptor Available");
4608 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
4609 NULL, adapter->queues[i].evnamebuf,
4610 "Queue Packets Transmitted");
4611 #ifndef IXGBE_LEGACY_TX
4612 evcnt_attach_dynamic(&txr->br->br_drops, EVCNT_TYPE_MISC,
4613 NULL, adapter->queues[i].evnamebuf,
4614 "Packets dropped in buf_ring");
4615 #endif
4616
4617 #ifdef LRO
4618 struct lro_ctrl *lro = &rxr->lro;
4619 #endif /* LRO */
4620
4621 if (sysctl_createv(log, 0, &rnode, &cnode,
4622 CTLFLAG_READONLY,
4623 CTLTYPE_INT,
4624 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
4625 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
4626 CTL_CREATE, CTL_EOL) != 0)
4627 break;
4628
4629 if (sysctl_createv(log, 0, &rnode, &cnode,
4630 CTLFLAG_READONLY,
4631 CTLTYPE_INT,
4632 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
4633 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
4634 CTL_CREATE, CTL_EOL) != 0)
4635 break;
4636
4637 if (i < __arraycount(stats->mpc)) {
4638 evcnt_attach_dynamic(&stats->mpc[i],
4639 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4640 "Missed Packet Count");
4641 }
4642 if (i < __arraycount(stats->pxontxc)) {
4643 evcnt_attach_dynamic(&stats->pxontxc[i],
4644 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4645 "pxontxc");
4646 evcnt_attach_dynamic(&stats->pxonrxc[i],
4647 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4648 "pxonrxc");
4649 evcnt_attach_dynamic(&stats->pxofftxc[i],
4650 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4651 "pxofftxc");
4652 evcnt_attach_dynamic(&stats->pxoffrxc[i],
4653 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4654 "pxoffrxc");
4655 evcnt_attach_dynamic(&stats->pxon2offc[i],
4656 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4657 "pxon2offc");
4658 }
4659 if (i < __arraycount(stats->qprc)) {
4660 evcnt_attach_dynamic(&stats->qprc[i],
4661 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4662 "qprc");
4663 evcnt_attach_dynamic(&stats->qptc[i],
4664 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4665 "qptc");
4666 evcnt_attach_dynamic(&stats->qbrc[i],
4667 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4668 "qbrc");
4669 evcnt_attach_dynamic(&stats->qbtc[i],
4670 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4671 "qbtc");
4672 evcnt_attach_dynamic(&stats->qprdc[i],
4673 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
4674 "qprdc");
4675 }
4676
4677 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
4678 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
4679 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
4680 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
4681 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
4682 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
4683 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
4684 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
4685 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
4686 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
4687 evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_MISC,
4688 NULL, adapter->queues[i].evnamebuf, "Rx interrupts");
4689 #ifdef LRO
4690 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4691 CTLFLAG_RD, &lro->lro_queued, 0,
4692 "LRO Queued");
4693 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4694 CTLFLAG_RD, &lro->lro_flushed, 0,
4695 "LRO Flushed");
4696 #endif /* LRO */
4697 }
4698
4699 /* MAC stats get the own sub node */
4700
4701
4702 snprintf(stats->namebuf,
4703 sizeof(stats->namebuf), "%s MAC Statistics", device_xname(dev));
4704
4705 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
4706 stats->namebuf, "rx csum offload - IP");
4707 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
4708 stats->namebuf, "rx csum offload - L4");
4709 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
4710 stats->namebuf, "rx csum offload - IP bad");
4711 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
4712 stats->namebuf, "rx csum offload - L4 bad");
4713 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
4714 stats->namebuf, "Interrupt conditions zero");
4715 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
4716 stats->namebuf, "Legacy interrupts");
4717 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
4718 stats->namebuf, "CRC Errors");
4719 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
4720 stats->namebuf, "Illegal Byte Errors");
4721 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
4722 stats->namebuf, "Byte Errors");
4723 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
4724 stats->namebuf, "MAC Short Packets Discarded");
4725 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
4726 stats->namebuf, "MAC Local Faults");
4727 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
4728 stats->namebuf, "MAC Remote Faults");
4729 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
4730 stats->namebuf, "Receive Length Errors");
4731 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
4732 stats->namebuf, "Link XON Transmitted");
4733 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
4734 stats->namebuf, "Link XON Received");
4735 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
4736 stats->namebuf, "Link XOFF Transmitted");
4737 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
4738 stats->namebuf, "Link XOFF Received");
4739
4740 /* Packet Reception Stats */
4741 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
4742 stats->namebuf, "Total Octets Received");
4743 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
4744 stats->namebuf, "Good Octets Received");
4745 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
4746 stats->namebuf, "Total Packets Received");
4747 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
4748 stats->namebuf, "Good Packets Received");
4749 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
4750 stats->namebuf, "Multicast Packets Received");
4751 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
4752 stats->namebuf, "Broadcast Packets Received");
4753 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
4754 stats->namebuf, "64 byte frames received ");
4755 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
4756 stats->namebuf, "65-127 byte frames received");
4757 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
4758 stats->namebuf, "128-255 byte frames received");
4759 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
4760 stats->namebuf, "256-511 byte frames received");
4761 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
4762 stats->namebuf, "512-1023 byte frames received");
4763 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
4764 stats->namebuf, "1023-1522 byte frames received");
4765 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
4766 stats->namebuf, "Receive Undersized");
4767 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
4768 stats->namebuf, "Fragmented Packets Received ");
4769 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
4770 stats->namebuf, "Oversized Packets Received");
4771 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
4772 stats->namebuf, "Received Jabber");
4773 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
4774 stats->namebuf, "Management Packets Received");
4775 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
4776 stats->namebuf, "Checksum Errors");
4777
4778 /* Packet Transmission Stats */
4779 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
4780 stats->namebuf, "Good Octets Transmitted");
4781 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
4782 stats->namebuf, "Total Packets Transmitted");
4783 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
4784 stats->namebuf, "Good Packets Transmitted");
4785 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
4786 stats->namebuf, "Broadcast Packets Transmitted");
4787 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
4788 stats->namebuf, "Multicast Packets Transmitted");
4789 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
4790 stats->namebuf, "Management Packets Transmitted");
4791 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
4792 stats->namebuf, "64 byte frames transmitted ");
4793 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
4794 stats->namebuf, "65-127 byte frames transmitted");
4795 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
4796 stats->namebuf, "128-255 byte frames transmitted");
4797 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
4798 stats->namebuf, "256-511 byte frames transmitted");
4799 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
4800 stats->namebuf, "512-1023 byte frames transmitted");
4801 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
4802 stats->namebuf, "1024-1522 byte frames transmitted");
4803 }
4804
4805 static void
4806 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4807 const char *description, int *limit, int value)
4808 {
4809 device_t dev = adapter->dev;
4810 struct sysctllog **log;
4811 const struct sysctlnode *rnode, *cnode;
4812
4813 log = &adapter->sysctllog;
4814 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
4815 aprint_error_dev(dev, "could not create sysctl root\n");
4816 return;
4817 }
4818 if (sysctl_createv(log, 0, &rnode, &cnode,
4819 CTLFLAG_READWRITE, CTLTYPE_INT,
4820 name, SYSCTL_DESCR(description),
4821 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
4822 aprint_error_dev(dev, "could not create sysctl\n");
4823 *limit = value;
4824 }
4825
4826 /*
4827 ** Set flow control using sysctl:
4828 ** Flow control values:
4829 ** 0 - off
4830 ** 1 - rx pause
4831 ** 2 - tx pause
4832 ** 3 - full
4833 */
4834 static int
4835 ixgbe_set_flowcntl(SYSCTLFN_ARGS)
4836 {
4837 struct sysctlnode node = *rnode;
4838 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4839 int error, last;
4840
4841 node.sysctl_data = &adapter->fc;
4842 last = adapter->fc;
4843 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4844 if (error != 0 || newp == NULL)
4845 return error;
4846
4847 /* Don't bother if it's not changed */
4848 if (adapter->fc == last)
4849 return (0);
4850
4851 switch (adapter->fc) {
4852 case ixgbe_fc_rx_pause:
4853 case ixgbe_fc_tx_pause:
4854 case ixgbe_fc_full:
4855 adapter->hw.fc.requested_mode = adapter->fc;
4856 if (adapter->num_queues > 1)
4857 ixgbe_disable_rx_drop(adapter);
4858 break;
4859 case ixgbe_fc_none:
4860 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4861 if (adapter->num_queues > 1)
4862 ixgbe_enable_rx_drop(adapter);
4863 break;
4864 default:
4865 adapter->fc = last;
4866 return (EINVAL);
4867 }
4868 /* Don't autoneg if forcing a value */
4869 adapter->hw.fc.disable_fc_autoneg = TRUE;
4870 ixgbe_fc_enable(&adapter->hw);
4871 return 0;
4872 }
4873
4874 /*
4875 ** Control advertised link speed:
4876 ** Flags:
4877 ** 0x1 - advertise 100 Mb
4878 ** 0x2 - advertise 1G
4879 ** 0x4 - advertise 10G
4880 */
4881 static int
4882 ixgbe_set_advertise(SYSCTLFN_ARGS)
4883 {
4884 struct sysctlnode node = *rnode;
4885 int old, error = 0, requested;
4886 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4887 device_t dev;
4888 struct ixgbe_hw *hw;
4889 ixgbe_link_speed speed = 0;
4890
4891 dev = adapter->dev;
4892 hw = &adapter->hw;
4893
4894 old = requested = adapter->advertise;
4895 node.sysctl_data = &requested;
4896 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4897 if (error != 0 || newp == NULL)
4898 return error;
4899
4900 /* No speed changes for backplane media */
4901 if (hw->phy.media_type == ixgbe_media_type_backplane)
4902 return (ENODEV);
4903
4904 /* Checks to validate new value */
4905 if (requested == old) /* no change */
4906 return (0);
4907
4908 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4909 (hw->phy.multispeed_fiber))) {
4910 device_printf(dev,
4911 "Advertised speed can only be set on copper or "
4912 "multispeed fiber media types.\n");
4913 return (EINVAL);
4914 }
4915
4916 if (requested < 0x1 || requested > 0x7) {
4917 device_printf(dev,
4918 "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4919 return (EINVAL);
4920 }
4921
4922 if ((requested & 0x1)
4923 && (hw->mac.type != ixgbe_mac_X540)
4924 && (hw->mac.type != ixgbe_mac_X550)) {
4925 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4926 return (EINVAL);
4927 }
4928
4929 adapter->advertise = requested;
4930
4931 /* Set new value and report new advertised mode */
4932 if (requested & 0x1)
4933 speed |= IXGBE_LINK_SPEED_100_FULL;
4934 if (requested & 0x2)
4935 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4936 if (requested & 0x4)
4937 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4938
4939 hw->mac.autotry_restart = TRUE;
4940 hw->mac.ops.setup_link(hw, speed, TRUE);
4941 adapter->advertise = requested;
4942
4943 return 0;
4944 }
4945
4946 /*
4947 * The following two sysctls are for X552/X557-AT devices;
4948 * they deal with the external PHY used in them.
4949 */
4950 static int
4951 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
4952 {
4953 struct sysctlnode node = *rnode;
4954 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4955 struct ixgbe_hw *hw = &adapter->hw;
4956 int val;
4957 u16 reg;
4958 int error;
4959
4960 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4961 device_printf(adapter->dev,
4962 "Device has no supported external thermal sensor.\n");
4963 return (ENODEV);
4964 }
4965
4966 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4967 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4968 ®)) {
4969 device_printf(adapter->dev,
4970 "Error reading from PHY's current temperature register\n");
4971 return (EAGAIN);
4972 }
4973
4974 node.sysctl_data = &val;
4975
4976 /* Shift temp for output */
4977 val = reg >> 8;
4978
4979 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4980 if ((error) || (newp == NULL))
4981 return (error);
4982
4983 return (0);
4984 }
4985
4986 /*
4987 * Reports whether the current PHY temperature is over
4988 * the overtemp threshold.
4989 * - This is reported directly from the PHY
4990 */
4991 static int
4992 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
4993 {
4994 struct sysctlnode node = *rnode;
4995 struct adapter *adapter = (struct adapter *)node.sysctl_data;
4996 struct ixgbe_hw *hw = &adapter->hw;
4997 int val, error;
4998 u16 reg;
4999
5000 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5001 device_printf(adapter->dev,
5002 "Device has no supported external thermal sensor.\n");
5003 return (ENODEV);
5004 }
5005
5006 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5007 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
5008 ®)) {
5009 device_printf(adapter->dev,
5010 "Error reading from PHY's temperature status register\n");
5011 return (EAGAIN);
5012 }
5013
5014 node.sysctl_data = &val;
5015
5016 /* Get occurrence bit */
5017 val = !!(reg & 0x4000);
5018
5019 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5020 if ((error) || (newp == NULL))
5021 return (error);
5022
5023 return (0);
5024 }
5025
5026 /*
5027 ** Thermal Shutdown Trigger (internal MAC)
5028 ** - Set this to 1 to cause an overtemp event to occur
5029 */
5030 static int
5031 ixgbe_sysctl_thermal_test(SYSCTLFN_ARGS)
5032 {
5033 struct sysctlnode node = *rnode;
5034 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5035 int error, fire = 0;
5036 struct ixgbe_hw *hw;
5037
5038 hw = &adapter->hw;
5039
5040 node.sysctl_data = &fire;
5041 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5042 if ((error) || (newp == NULL))
5043 return (error);
5044
5045 if (fire) {
5046 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
5047 reg |= IXGBE_EICR_TS;
5048 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
5049 }
5050
5051 return (0);
5052 }
5053
5054 /*
5055 ** Manage DMA Coalescing.
5056 ** Control values:
5057 ** 0/1 - off / on (use default value of 1000)
5058 **
5059 ** Legal timer values are:
5060 ** 50,100,250,500,1000,2000,5000,10000
5061 **
5062 ** Turning off interrupt moderation will also turn this off.
5063 */
5064 static int
5065 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5066 {
5067 struct sysctlnode node = *rnode;
5068 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5069 struct ifnet *ifp = adapter->ifp;
5070 int error;
5071 u16 oldval;
5072 int newval;
5073
5074 oldval = adapter->dmac;
5075 newval = oldval;
5076 node.sysctl_data = &newval;
5077 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5078 if ((error) || (newp == NULL))
5079 return (error);
5080
5081 switch (newval) {
5082 case 0:
5083 /* Disabled */
5084 adapter->dmac = 0;
5085 break;
5086 case 1:
5087 /* Enable and use default */
5088 adapter->dmac = 1000;
5089 break;
5090 case 50:
5091 case 100:
5092 case 250:
5093 case 500:
5094 case 1000:
5095 case 2000:
5096 case 5000:
5097 case 10000:
5098 /* Legal values - allow */
5099 adapter->dmac = newval;
5100 break;
5101 default:
5102 /* Do nothing, illegal value */
5103 return (EINVAL);
5104 }
5105
5106 /* Re-initialize hardware if it's already running */
5107 if (ifp->if_flags & IFF_RUNNING)
5108 ixgbe_init(ifp);
5109
5110 return (0);
5111 }
5112
5113 #ifdef IXGBE_DEBUG
5114 /**
5115 * Sysctl to test power states
5116 * Values:
5117 * 0 - set device to D0
5118 * 3 - set device to D3
5119 * (none) - get current device power state
5120 */
5121 static int
5122 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5123 {
5124 struct sysctlnode node = *rnode;
5125 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5126 device_t dev = adapter->dev;
5127 int curr_ps, new_ps, error = 0;
5128
5129 #if notyet
5130 curr_ps = new_ps = pci_get_powerstate(dev);
5131
5132 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5133 if ((error) || (req->newp == NULL))
5134 return (error);
5135
5136 if (new_ps == curr_ps)
5137 return (0);
5138
5139 if (new_ps == 3 && curr_ps == 0)
5140 error = DEVICE_SUSPEND(dev);
5141 else if (new_ps == 0 && curr_ps == 3)
5142 error = DEVICE_RESUME(dev);
5143 else
5144 return (EINVAL);
5145
5146 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5147
5148 return (error);
5149 #else
5150 return 0;
5151 #endif
5152 }
5153 #endif
5154 /*
5155 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
5156 * Values:
5157 * 0 - disabled
5158 * 1 - enabled
5159 */
5160 static int
5161 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5162 {
5163 struct sysctlnode node = *rnode;
5164 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5165 struct ixgbe_hw *hw = &adapter->hw;
5166 int new_wol_enabled;
5167 int error = 0;
5168
5169 new_wol_enabled = hw->wol_enabled;
5170 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5171 if ((error) || (newp == NULL))
5172 return (error);
5173 new_wol_enabled = !!(new_wol_enabled);
5174 if (new_wol_enabled == hw->wol_enabled)
5175 return (0);
5176
5177 if (new_wol_enabled > 0 && !adapter->wol_support)
5178 return (ENODEV);
5179 else
5180 hw->wol_enabled = new_wol_enabled;
5181
5182 return (0);
5183 }
5184
5185 /*
5186 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
5187 * if supported by the adapter.
5188 * Values:
5189 * 0 - disabled
5190 * 1 - enabled
5191 */
5192 static int
5193 ixgbe_sysctl_eee_enable(SYSCTLFN_ARGS)
5194 {
5195 struct sysctlnode node = *rnode;
5196 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5197 struct ixgbe_hw *hw = &adapter->hw;
5198 struct ifnet *ifp = adapter->ifp;
5199 int new_eee_enabled, error = 0;
5200
5201 new_eee_enabled = adapter->eee_enabled;
5202 node.sysctl_data = &new_eee_enabled;
5203 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5204 if ((error) || (newp == NULL))
5205 return (error);
5206 new_eee_enabled = !!(new_eee_enabled);
5207 if (new_eee_enabled == adapter->eee_enabled)
5208 return (0);
5209
5210 if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
5211 return (ENODEV);
5212 else
5213 adapter->eee_enabled = new_eee_enabled;
5214
5215 /* Re-initialize hardware if it's already running */
5216 if (ifp->if_flags & IFF_RUNNING)
5217 ixgbe_init(ifp);
5218
5219 return (0);
5220 }
5221
5222 /*
5223 * Read-only sysctl indicating whether EEE support was negotiated
5224 * on the link.
5225 */
5226 static int
5227 ixgbe_sysctl_eee_negotiated(SYSCTLFN_ARGS)
5228 {
5229 struct sysctlnode node = *rnode;
5230 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5231 struct ixgbe_hw *hw = &adapter->hw;
5232 bool status;
5233
5234 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
5235
5236 node.sysctl_data = &status;
5237 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
5238 }
5239
5240 /*
5241 * Read-only sysctl indicating whether RX Link is in LPI state.
5242 */
5243 static int
5244 ixgbe_sysctl_eee_rx_lpi_status(SYSCTLFN_ARGS)
5245 {
5246 struct sysctlnode node = *rnode;
5247 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5248 struct ixgbe_hw *hw = &adapter->hw;
5249 bool status;
5250
5251 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5252 IXGBE_EEE_RX_LPI_STATUS);
5253
5254 node.sysctl_data = &status;
5255 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
5256 }
5257
5258 /*
5259 * Read-only sysctl indicating whether TX Link is in LPI state.
5260 */
5261 static int
5262 ixgbe_sysctl_eee_tx_lpi_status(SYSCTLFN_ARGS)
5263 {
5264 struct sysctlnode node = *rnode;
5265 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5266 struct ixgbe_hw *hw = &adapter->hw;
5267 bool status;
5268
5269 status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
5270 IXGBE_EEE_TX_LPI_STATUS);
5271
5272 node.sysctl_data = &status;
5273 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
5274 }
5275
5276 /*
5277 * Read-only sysctl indicating TX Link LPI delay
5278 */
5279 static int
5280 ixgbe_sysctl_eee_tx_lpi_delay(SYSCTLFN_ARGS)
5281 {
5282 struct sysctlnode node = *rnode;
5283 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5284 struct ixgbe_hw *hw = &adapter->hw;
5285 u32 reg;
5286
5287 reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
5288
5289 reg >>= 26;
5290 node.sysctl_data = ®
5291 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
5292 }
5293
5294 /*
5295 * Sysctl to enable/disable the types of packets that the
5296 * adapter will wake up on upon receipt.
5297 * WUFC - Wake Up Filter Control
5298 * Flags:
5299 * 0x1 - Link Status Change
5300 * 0x2 - Magic Packet
5301 * 0x4 - Direct Exact
5302 * 0x8 - Directed Multicast
5303 * 0x10 - Broadcast
5304 * 0x20 - ARP/IPv4 Request Packet
5305 * 0x40 - Direct IPv4 Packet
5306 * 0x80 - Direct IPv6 Packet
5307 *
5308 * Setting another flag will cause the sysctl to return an
5309 * error.
5310 */
5311 static int
5312 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5313 {
5314 struct sysctlnode node = *rnode;
5315 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5316 int error = 0;
5317 u32 new_wufc;
5318
5319 new_wufc = adapter->wufc;
5320
5321 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5322 if ((error) || (newp == NULL))
5323 return (error);
5324 if (new_wufc == adapter->wufc)
5325 return (0);
5326
5327 if (new_wufc & 0xffffff00)
5328 return (EINVAL);
5329 else {
5330 new_wufc &= 0xff;
5331 new_wufc |= (0xffffff & adapter->wufc);
5332 adapter->wufc = new_wufc;
5333 }
5334
5335 return (0);
5336 }
5337
5338 #ifdef IXGBE_DEBUG
5339 static int
5340 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5341 {
5342 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5343 struct ixgbe_hw *hw = &adapter->hw;
5344 device_t dev = adapter->dev;
5345 int error = 0, reta_size;
5346 struct sbuf *buf;
5347 u32 reg;
5348
5349 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5350 if (!buf) {
5351 device_printf(dev, "Could not allocate sbuf for output.\n");
5352 return (ENOMEM);
5353 }
5354
5355 // TODO: use sbufs to make a string to print out
5356 /* Set multiplier for RETA setup and table size based on MAC */
5357 switch (adapter->hw.mac.type) {
5358 case ixgbe_mac_X550:
5359 case ixgbe_mac_X550EM_x:
5360 reta_size = 128;
5361 break;
5362 default:
5363 reta_size = 32;
5364 break;
5365 }
5366
5367 /* Print out the redirection table */
5368 sbuf_cat(buf, "\n");
5369 for (int i = 0; i < reta_size; i++) {
5370 if (i < 32) {
5371 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5372 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5373 } else {
5374 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5375 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5376 }
5377 }
5378
5379 // TODO: print more config
5380
5381 error = sbuf_finish(buf);
5382 if (error)
5383 device_printf(dev, "Error finishing sbuf: %d\n", error);
5384
5385 sbuf_delete(buf);
5386 return (0);
5387 }
5388 #endif /* IXGBE_DEBUG */
5389
5390 /*
5391 ** Enable the hardware to drop packets when the buffer is
5392 ** full. This is useful when multiqueue,so that no single
5393 ** queue being full stalls the entire RX engine. We only
5394 ** enable this when Multiqueue AND when Flow Control is
5395 ** disabled.
5396 */
5397 static void
5398 ixgbe_enable_rx_drop(struct adapter *adapter)
5399 {
5400 struct ixgbe_hw *hw = &adapter->hw;
5401
5402 for (int i = 0; i < adapter->num_queues; i++) {
5403 struct rx_ring *rxr = &adapter->rx_rings[i];
5404 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5405 srrctl |= IXGBE_SRRCTL_DROP_EN;
5406 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5407 }
5408 #ifdef PCI_IOV
5409 /* enable drop for each vf */
5410 for (int i = 0; i < adapter->num_vfs; i++) {
5411 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5412 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5413 IXGBE_QDE_ENABLE));
5414 }
5415 #endif
5416 }
5417
5418 static void
5419 ixgbe_disable_rx_drop(struct adapter *adapter)
5420 {
5421 struct ixgbe_hw *hw = &adapter->hw;
5422
5423 for (int i = 0; i < adapter->num_queues; i++) {
5424 struct rx_ring *rxr = &adapter->rx_rings[i];
5425 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5426 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5427 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5428 }
5429 #ifdef PCI_IOV
5430 /* disable drop for each vf */
5431 for (int i = 0; i < adapter->num_vfs; i++) {
5432 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5433 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5434 }
5435 #endif
5436 }
5437
5438 #ifdef PCI_IOV
5439
5440 /*
5441 ** Support functions for SRIOV/VF management
5442 */
5443
5444 static void
5445 ixgbe_ping_all_vfs(struct adapter *adapter)
5446 {
5447 struct ixgbe_vf *vf;
5448
5449 for (int i = 0; i < adapter->num_vfs; i++) {
5450 vf = &adapter->vfs[i];
5451 if (vf->flags & IXGBE_VF_ACTIVE)
5452 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5453 }
5454 }
5455
5456
5457 static void
5458 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
5459 uint16_t tag)
5460 {
5461 struct ixgbe_hw *hw;
5462 uint32_t vmolr, vmvir;
5463
5464 hw = &adapter->hw;
5465
5466 vf->vlan_tag = tag;
5467
5468 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
5469
5470 /* Do not receive packets that pass inexact filters. */
5471 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
5472
5473 /* Disable Multicast Promicuous Mode. */
5474 vmolr &= ~IXGBE_VMOLR_MPE;
5475
5476 /* Accept broadcasts. */
5477 vmolr |= IXGBE_VMOLR_BAM;
5478
5479 if (tag == 0) {
5480 /* Accept non-vlan tagged traffic. */
5481 //vmolr |= IXGBE_VMOLR_AUPE;
5482
5483 /* Allow VM to tag outgoing traffic; no default tag. */
5484 vmvir = 0;
5485 } else {
5486 /* Require vlan-tagged traffic. */
5487 vmolr &= ~IXGBE_VMOLR_AUPE;
5488
5489 /* Tag all traffic with provided vlan tag. */
5490 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
5491 }
5492 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5493 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5494 }
5495
5496
5497 static boolean_t
5498 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5499 {
5500
5501 /*
5502 * Frame size compatibility between PF and VF is only a problem on
5503 * 82599-based cards. X540 and later support any combination of jumbo
5504 * frames on PFs and VFs.
5505 */
5506 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5507 return (TRUE);
5508
5509 switch (vf->api_ver) {
5510 case IXGBE_API_VER_1_0:
5511 case IXGBE_API_VER_UNKNOWN:
5512 /*
5513 * On legacy (1.0 and older) VF versions, we don't support jumbo
5514 * frames on either the PF or the VF.
5515 */
5516 if (adapter->max_frame_size > ETHER_MAX_LEN ||
5517 vf->max_frame_size > ETHER_MAX_LEN)
5518 return (FALSE);
5519
5520 return (TRUE);
5521
5522 break;
5523 case IXGBE_API_VER_1_1:
5524 default:
5525 /*
5526 * 1.1 or later VF versions always work if they aren't using
5527 * jumbo frames.
5528 */
5529 if (vf->max_frame_size <= ETHER_MAX_LEN)
5530 return (TRUE);
5531
5532 /*
5533 * Jumbo frames only work with VFs if the PF is also using jumbo
5534 * frames.
5535 */
5536 if (adapter->max_frame_size <= ETHER_MAX_LEN)
5537 return (TRUE);
5538
5539 return (FALSE);
5540
5541 }
5542 }
5543
5544
5545 static void
5546 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5547 {
5548 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5549
5550 // XXX clear multicast addresses
5551
5552 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5553
5554 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5555 }
5556
5557
5558 static void
5559 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5560 {
5561 struct ixgbe_hw *hw;
5562 uint32_t vf_index, vfte;
5563
5564 hw = &adapter->hw;
5565
5566 vf_index = IXGBE_VF_INDEX(vf->pool);
5567 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5568 vfte |= IXGBE_VF_BIT(vf->pool);
5569 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5570 }
5571
5572
5573 static void
5574 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5575 {
5576 struct ixgbe_hw *hw;
5577 uint32_t vf_index, vfre;
5578
5579 hw = &adapter->hw;
5580
5581 vf_index = IXGBE_VF_INDEX(vf->pool);
5582 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5583 if (ixgbe_vf_frame_size_compatible(adapter, vf))
5584 vfre |= IXGBE_VF_BIT(vf->pool);
5585 else
5586 vfre &= ~IXGBE_VF_BIT(vf->pool);
5587 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5588 }
5589
5590
5591 static void
5592 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5593 {
5594 struct ixgbe_hw *hw;
5595 uint32_t ack;
5596 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5597
5598 hw = &adapter->hw;
5599
5600 ixgbe_process_vf_reset(adapter, vf);
5601
5602 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5603 ixgbe_set_rar(&adapter->hw, vf->rar_index,
5604 vf->ether_addr, vf->pool, TRUE);
5605 ack = IXGBE_VT_MSGTYPE_ACK;
5606 } else
5607 ack = IXGBE_VT_MSGTYPE_NACK;
5608
5609 ixgbe_vf_enable_transmit(adapter, vf);
5610 ixgbe_vf_enable_receive(adapter, vf);
5611
5612 vf->flags |= IXGBE_VF_CTS;
5613
5614 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5615 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5616 resp[3] = hw->mac.mc_filter_type;
5617 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5618 }
5619
5620
5621 static void
5622 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5623 {
5624 uint8_t *mac;
5625
5626 mac = (uint8_t*)&msg[1];
5627
5628 /* Check that the VF has permission to change the MAC address. */
5629 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5630 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5631 return;
5632 }
5633
5634 if (ixgbe_validate_mac_addr(mac) != 0) {
5635 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5636 return;
5637 }
5638
5639 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5640
5641 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5642 vf->pool, TRUE);
5643
5644 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5645 }
5646
5647
5648 /*
5649 ** VF multicast addresses are set by using the appropriate bit in
5650 ** 1 of 128 32 bit addresses (4096 possible).
5651 */
5652 static void
5653 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5654 {
5655 u16 *list = (u16*)&msg[1];
5656 int entries;
5657 u32 vmolr, vec_bit, vec_reg, mta_reg;
5658
5659 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5660 entries = min(entries, IXGBE_MAX_VF_MC);
5661
5662 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5663
5664 vf->num_mc_hashes = entries;
5665
5666 /* Set the appropriate MTA bit */
5667 for (int i = 0; i < entries; i++) {
5668 vf->mc_hash[i] = list[i];
5669 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5670 vec_bit = vf->mc_hash[i] & 0x1F;
5671 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5672 mta_reg |= (1 << vec_bit);
5673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5674 }
5675
5676 vmolr |= IXGBE_VMOLR_ROMPE;
5677 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5678 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5679 return;
5680 }
5681
5682
5683 static void
5684 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5685 {
5686 struct ixgbe_hw *hw;
5687 int enable;
5688 uint16_t tag;
5689
5690 hw = &adapter->hw;
5691 enable = IXGBE_VT_MSGINFO(msg[0]);
5692 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5693
5694 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5695 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5696 return;
5697 }
5698
5699 /* It is illegal to enable vlan tag 0. */
5700 if (tag == 0 && enable != 0){
5701 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5702 return;
5703 }
5704
5705 ixgbe_set_vfta(hw, tag, vf->pool, enable);
5706 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5707 }
5708
5709
5710 static void
5711 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5712 {
5713 struct ixgbe_hw *hw;
5714 uint32_t vf_max_size, pf_max_size, mhadd;
5715
5716 hw = &adapter->hw;
5717 vf_max_size = msg[1];
5718
5719 if (vf_max_size < ETHER_CRC_LEN) {
5720 /* We intentionally ACK invalid LPE requests. */
5721 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5722 return;
5723 }
5724
5725 vf_max_size -= ETHER_CRC_LEN;
5726
5727 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5728 /* We intentionally ACK invalid LPE requests. */
5729 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5730 return;
5731 }
5732
5733 vf->max_frame_size = vf_max_size;
5734 ixgbe_update_max_frame(adapter, vf->max_frame_size);
5735
5736 /*
5737 * We might have to disable reception to this VF if the frame size is
5738 * not compatible with the config on the PF.
5739 */
5740 ixgbe_vf_enable_receive(adapter, vf);
5741
5742 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5743 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5744
5745 if (pf_max_size < adapter->max_frame_size) {
5746 mhadd &= ~IXGBE_MHADD_MFS_MASK;
5747 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5748 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5749 }
5750
5751 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5752 }
5753
5754
5755 static void
5756 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5757 uint32_t *msg)
5758 {
5759 //XXX implement this
5760 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5761 }
5762
5763
5764 static void
5765 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5766 uint32_t *msg)
5767 {
5768
5769 switch (msg[1]) {
5770 case IXGBE_API_VER_1_0:
5771 case IXGBE_API_VER_1_1:
5772 vf->api_ver = msg[1];
5773 ixgbe_send_vf_ack(adapter, vf, msg[0]);
5774 break;
5775 default:
5776 vf->api_ver = IXGBE_API_VER_UNKNOWN;
5777 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5778 break;
5779 }
5780 }
5781
5782
5783 static void
5784 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5785 uint32_t *msg)
5786 {
5787 struct ixgbe_hw *hw;
5788 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5789 int num_queues;
5790
5791 hw = &adapter->hw;
5792
5793 /* GET_QUEUES is not supported on pre-1.1 APIs. */
5794 switch (msg[0]) {
5795 case IXGBE_API_VER_1_0:
5796 case IXGBE_API_VER_UNKNOWN:
5797 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5798 return;
5799 }
5800
5801 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5802 IXGBE_VT_MSGTYPE_CTS;
5803
5804 num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5805 resp[IXGBE_VF_TX_QUEUES] = num_queues;
5806 resp[IXGBE_VF_RX_QUEUES] = num_queues;
5807 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5808 resp[IXGBE_VF_DEF_QUEUE] = 0;
5809
5810 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5811 }
5812
5813
5814 static void
5815 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5816 {
5817 struct ixgbe_hw *hw;
5818 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5819 int error;
5820
5821 hw = &adapter->hw;
5822
5823 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5824
5825 if (error != 0)
5826 return;
5827
5828 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5829 adapter->ifp->if_xname, msg[0], vf->pool);
5830 if (msg[0] == IXGBE_VF_RESET) {
5831 ixgbe_vf_reset_msg(adapter, vf, msg);
5832 return;
5833 }
5834
5835 if (!(vf->flags & IXGBE_VF_CTS)) {
5836 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5837 return;
5838 }
5839
5840 switch (msg[0] & IXGBE_VT_MSG_MASK) {
5841 case IXGBE_VF_SET_MAC_ADDR:
5842 ixgbe_vf_set_mac(adapter, vf, msg);
5843 break;
5844 case IXGBE_VF_SET_MULTICAST:
5845 ixgbe_vf_set_mc_addr(adapter, vf, msg);
5846 break;
5847 case IXGBE_VF_SET_VLAN:
5848 ixgbe_vf_set_vlan(adapter, vf, msg);
5849 break;
5850 case IXGBE_VF_SET_LPE:
5851 ixgbe_vf_set_lpe(adapter, vf, msg);
5852 break;
5853 case IXGBE_VF_SET_MACVLAN:
5854 ixgbe_vf_set_macvlan(adapter, vf, msg);
5855 break;
5856 case IXGBE_VF_API_NEGOTIATE:
5857 ixgbe_vf_api_negotiate(adapter, vf, msg);
5858 break;
5859 case IXGBE_VF_GET_QUEUES:
5860 ixgbe_vf_get_queues(adapter, vf, msg);
5861 break;
5862 default:
5863 ixgbe_send_vf_nack(adapter, vf, msg[0]);
5864 }
5865 }
5866
5867
5868 /*
5869 * Tasklet for handling VF -> PF mailbox messages.
5870 */
5871 static void
5872 ixgbe_handle_mbx(void *context, int pending)
5873 {
5874 struct adapter *adapter;
5875 struct ixgbe_hw *hw;
5876 struct ixgbe_vf *vf;
5877 int i;
5878
5879 adapter = context;
5880 hw = &adapter->hw;
5881
5882 IXGBE_CORE_LOCK(adapter);
5883 for (i = 0; i < adapter->num_vfs; i++) {
5884 vf = &adapter->vfs[i];
5885
5886 if (vf->flags & IXGBE_VF_ACTIVE) {
5887 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5888 ixgbe_process_vf_reset(adapter, vf);
5889
5890 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5891 ixgbe_process_vf_msg(adapter, vf);
5892
5893 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5894 ixgbe_process_vf_ack(adapter, vf);
5895 }
5896 }
5897 IXGBE_CORE_UNLOCK(adapter);
5898 }
5899
5900
5901 static int
5902 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5903 {
5904 struct adapter *adapter;
5905 enum ixgbe_iov_mode mode;
5906
5907 adapter = device_get_softc(dev);
5908 adapter->num_vfs = num_vfs;
5909 mode = ixgbe_get_iov_mode(adapter);
5910
5911 if (num_vfs > ixgbe_max_vfs(mode)) {
5912 adapter->num_vfs = 0;
5913 return (ENOSPC);
5914 }
5915
5916 IXGBE_CORE_LOCK(adapter);
5917
5918 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5919 M_NOWAIT | M_ZERO);
5920
5921 if (adapter->vfs == NULL) {
5922 adapter->num_vfs = 0;
5923 IXGBE_CORE_UNLOCK(adapter);
5924 return (ENOMEM);
5925 }
5926
5927 ixgbe_init_locked(adapter);
5928
5929 IXGBE_CORE_UNLOCK(adapter);
5930
5931 return (0);
5932 }
5933
5934
5935 static void
5936 ixgbe_uninit_iov(device_t dev)
5937 {
5938 struct ixgbe_hw *hw;
5939 struct adapter *adapter;
5940 uint32_t pf_reg, vf_reg;
5941
5942 adapter = device_get_softc(dev);
5943 hw = &adapter->hw;
5944
5945 IXGBE_CORE_LOCK(adapter);
5946
5947 /* Enable rx/tx for the PF and disable it for all VFs. */
5948 pf_reg = IXGBE_VF_INDEX(adapter->pool);
5949 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5950 IXGBE_VF_BIT(adapter->pool));
5951 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5952 IXGBE_VF_BIT(adapter->pool));
5953
5954 if (pf_reg == 0)
5955 vf_reg = 1;
5956 else
5957 vf_reg = 0;
5958 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5959 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5960
5961 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5962
5963 free(adapter->vfs, M_IXGBE);
5964 adapter->vfs = NULL;
5965 adapter->num_vfs = 0;
5966
5967 IXGBE_CORE_UNLOCK(adapter);
5968 }
5969
5970
5971 static void
5972 ixgbe_initialize_iov(struct adapter *adapter)
5973 {
5974 struct ixgbe_hw *hw = &adapter->hw;
5975 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5976 enum ixgbe_iov_mode mode;
5977 int i;
5978
5979 mode = ixgbe_get_iov_mode(adapter);
5980 if (mode == IXGBE_NO_VM)
5981 return;
5982
5983 IXGBE_CORE_LOCK_ASSERT(adapter);
5984
5985 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5986 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5987
5988 switch (mode) {
5989 case IXGBE_64_VM:
5990 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5991 break;
5992 case IXGBE_32_VM:
5993 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5994 break;
5995 default:
5996 panic("Unexpected SR-IOV mode %d", mode);
5997 }
5998 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5999
6000 mtqc = IXGBE_MTQC_VT_ENA;
6001 switch (mode) {
6002 case IXGBE_64_VM:
6003 mtqc |= IXGBE_MTQC_64VF;
6004 break;
6005 case IXGBE_32_VM:
6006 mtqc |= IXGBE_MTQC_32VF;
6007 break;
6008 default:
6009 panic("Unexpected SR-IOV mode %d", mode);
6010 }
6011 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
6012
6013
6014 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
6015 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
6016 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
6017 switch (mode) {
6018 case IXGBE_64_VM:
6019 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
6020 break;
6021 case IXGBE_32_VM:
6022 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
6023 break;
6024 default:
6025 panic("Unexpected SR-IOV mode %d", mode);
6026 }
6027 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
6028
6029
6030 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6031 gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
6032 switch (mode) {
6033 case IXGBE_64_VM:
6034 gpie |= IXGBE_GPIE_VTMODE_64;
6035 break;
6036 case IXGBE_32_VM:
6037 gpie |= IXGBE_GPIE_VTMODE_32;
6038 break;
6039 default:
6040 panic("Unexpected SR-IOV mode %d", mode);
6041 }
6042 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
6043
6044 /* Enable rx/tx for the PF. */
6045 vf_reg = IXGBE_VF_INDEX(adapter->pool);
6046 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
6047 IXGBE_VF_BIT(adapter->pool));
6048 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
6049 IXGBE_VF_BIT(adapter->pool));
6050
6051 /* Allow VM-to-VM communication. */
6052 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
6053
6054 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
6055 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
6056 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
6057
6058 for (i = 0; i < adapter->num_vfs; i++)
6059 ixgbe_init_vf(adapter, &adapter->vfs[i]);
6060 }
6061
6062
6063 /*
6064 ** Check the max frame setting of all active VF's
6065 */
6066 static void
6067 ixgbe_recalculate_max_frame(struct adapter *adapter)
6068 {
6069 struct ixgbe_vf *vf;
6070
6071 IXGBE_CORE_LOCK_ASSERT(adapter);
6072
6073 for (int i = 0; i < adapter->num_vfs; i++) {
6074 vf = &adapter->vfs[i];
6075 if (vf->flags & IXGBE_VF_ACTIVE)
6076 ixgbe_update_max_frame(adapter, vf->max_frame_size);
6077 }
6078 }
6079
6080
6081 static void
6082 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
6083 {
6084 struct ixgbe_hw *hw;
6085 uint32_t vf_index, pfmbimr;
6086
6087 IXGBE_CORE_LOCK_ASSERT(adapter);
6088
6089 hw = &adapter->hw;
6090
6091 if (!(vf->flags & IXGBE_VF_ACTIVE))
6092 return;
6093
6094 vf_index = IXGBE_VF_INDEX(vf->pool);
6095 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
6096 pfmbimr |= IXGBE_VF_BIT(vf->pool);
6097 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
6098
6099 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
6100
6101 // XXX multicast addresses
6102
6103 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
6104 ixgbe_set_rar(&adapter->hw, vf->rar_index,
6105 vf->ether_addr, vf->pool, TRUE);
6106 }
6107
6108 ixgbe_vf_enable_transmit(adapter, vf);
6109 ixgbe_vf_enable_receive(adapter, vf);
6110
6111 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
6112 }
6113
6114 static int
6115 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
6116 {
6117 struct adapter *adapter;
6118 struct ixgbe_vf *vf;
6119 const void *mac;
6120
6121 adapter = device_get_softc(dev);
6122
6123 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
6124 vfnum, adapter->num_vfs));
6125
6126 IXGBE_CORE_LOCK(adapter);
6127 vf = &adapter->vfs[vfnum];
6128 vf->pool= vfnum;
6129
6130 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
6131 vf->rar_index = vfnum + 1;
6132 vf->default_vlan = 0;
6133 vf->max_frame_size = ETHER_MAX_LEN;
6134 ixgbe_update_max_frame(adapter, vf->max_frame_size);
6135
6136 if (nvlist_exists_binary(config, "mac-addr")) {
6137 mac = nvlist_get_binary(config, "mac-addr", NULL);
6138 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
6139 if (nvlist_get_bool(config, "allow-set-mac"))
6140 vf->flags |= IXGBE_VF_CAP_MAC;
6141 } else
6142 /*
6143 * If the administrator has not specified a MAC address then
6144 * we must allow the VF to choose one.
6145 */
6146 vf->flags |= IXGBE_VF_CAP_MAC;
6147
6148 vf->flags = IXGBE_VF_ACTIVE;
6149
6150 ixgbe_init_vf(adapter, vf);
6151 IXGBE_CORE_UNLOCK(adapter);
6152
6153 return (0);
6154 }
6155 #endif /* PCI_IOV */
6156 static void
6157 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
6158 {
6159 u32 mask;
6160
6161 switch (adapter->hw.mac.type) {
6162 case ixgbe_mac_82598EB:
6163 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
6164 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
6165 break;
6166 case ixgbe_mac_82599EB:
6167 case ixgbe_mac_X540:
6168 case ixgbe_mac_X550:
6169 case ixgbe_mac_X550EM_x:
6170 mask = (queues & 0xFFFFFFFF);
6171 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
6172 mask = (queues >> 32);
6173 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
6174 break;
6175 default:
6176 break;
6177 }
6178 }
6179