ixv.c revision 1.165 1 /* $NetBSD: ixv.c,v 1.165 2021/08/25 09:06:02 msaitoh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.165 2021/08/25 09:06:02 msaitoh Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #endif
45
46 #include "ixgbe.h"
47 #include "vlan.h"
48
49 /************************************************************************
50 * Driver version
51 ************************************************************************/
52 static const char ixv_driver_version[] = "2.0.1-k";
53 /* XXX NetBSD: + 1.5.17 */
54
55 /************************************************************************
56 * PCI Device ID Table
57 *
58 * Used by probe to select devices to load on
59 * Last field stores an index into ixv_strings
60 * Last entry must be all 0s
61 *
62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
63 ************************************************************************/
64 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
65 {
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
71 /* required last entry */
72 {0, 0, 0, 0, 0}
73 };
74
75 /************************************************************************
76 * Table of branding strings
77 ************************************************************************/
78 static const char *ixv_strings[] = {
79 "Intel(R) PRO/10GbE Virtual Function Network Driver"
80 };
81
82 /*********************************************************************
83 * Function prototypes
84 *********************************************************************/
85 static int ixv_probe(device_t, cfdata_t, void *);
86 static void ixv_attach(device_t, device_t, void *);
87 static int ixv_detach(device_t, int);
88 #if 0
89 static int ixv_shutdown(device_t);
90 #endif
91 static int ixv_ifflags_cb(struct ethercom *);
92 static int ixv_ioctl(struct ifnet *, u_long, void *);
93 static int ixv_init(struct ifnet *);
94 static void ixv_init_locked(struct adapter *);
95 static void ixv_ifstop(struct ifnet *, int);
96 static void ixv_stop_locked(void *);
97 static void ixv_init_device_features(struct adapter *);
98 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
99 static int ixv_media_change(struct ifnet *);
100 static int ixv_allocate_pci_resources(struct adapter *,
101 const struct pci_attach_args *);
102 static void ixv_free_deferred_handlers(struct adapter *);
103 static int ixv_allocate_msix(struct adapter *,
104 const struct pci_attach_args *);
105 static int ixv_configure_interrupts(struct adapter *);
106 static void ixv_free_pci_resources(struct adapter *);
107 static void ixv_local_timer(void *);
108 static void ixv_handle_timer(struct work *, void *);
109 static int ixv_setup_interface(device_t, struct adapter *);
110 static void ixv_schedule_admin_tasklet(struct adapter *);
111 static int ixv_negotiate_api(struct adapter *);
112
113 static void ixv_initialize_transmit_units(struct adapter *);
114 static void ixv_initialize_receive_units(struct adapter *);
115 static void ixv_initialize_rss_mapping(struct adapter *);
116 static s32 ixv_check_link(struct adapter *);
117
118 static void ixv_enable_intr(struct adapter *);
119 static void ixv_disable_intr(struct adapter *);
120 static int ixv_set_rxfilter(struct adapter *);
121 static void ixv_update_link_status(struct adapter *);
122 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
123 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
124 static void ixv_configure_ivars(struct adapter *);
125 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
126 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
127
128 static void ixv_setup_vlan_tagging(struct adapter *);
129 static int ixv_setup_vlan_support(struct adapter *);
130 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
131 static int ixv_register_vlan(struct adapter *, u16);
132 static int ixv_unregister_vlan(struct adapter *, u16);
133
134 static void ixv_add_device_sysctls(struct adapter *);
135 static void ixv_save_stats(struct adapter *);
136 static void ixv_init_stats(struct adapter *);
137 static void ixv_update_stats(struct adapter *);
138 static void ixv_add_stats_sysctls(struct adapter *);
139 static void ixv_clear_evcnt(struct adapter *);
140
141 /* Sysctl handlers */
142 static void ixv_set_sysctl_value(struct adapter *, const char *,
143 const char *, int *, int);
144 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
145 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
146 static int ixv_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
147 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
148 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
149 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
150 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
151 static int ixv_sysctl_rx_copy_len(SYSCTLFN_PROTO);
152
153 /* The MSI-X Interrupt handlers */
154 static int ixv_msix_que(void *);
155 static int ixv_msix_mbx(void *);
156
157 /* Event handlers running on workqueue */
158 static void ixv_handle_que(void *);
159
160 /* Deferred workqueue handlers */
161 static void ixv_handle_admin(struct work *, void *);
162 static void ixv_handle_que_work(struct work *, void *);
163
164 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
165 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
166
167 /************************************************************************
168 * NetBSD Device Interface Entry Points
169 ************************************************************************/
170 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
171 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
172 DVF_DETACH_SHUTDOWN);
173
174 #if 0
175 static driver_t ixv_driver = {
176 "ixv", ixv_methods, sizeof(struct adapter),
177 };
178
179 devclass_t ixv_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
183 #endif
184
185 /*
186 * TUNEABLE PARAMETERS:
187 */
188
189 /* Number of Queues - do not exceed MSI-X vectors - 1 */
190 static int ixv_num_queues = 0;
191 #define TUNABLE_INT(__x, __y)
192 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
193
194 /*
195 * AIM: Adaptive Interrupt Moderation
196 * which means that the interrupt rate
197 * is varied over time based on the
198 * traffic for that interrupt vector
199 */
200 static bool ixv_enable_aim = false;
201 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
202
203 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
204 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
205
206 /* How many packets rxeof tries to clean at a time */
207 static int ixv_rx_process_limit = 256;
208 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
209
210 /* How many packets txeof tries to clean at a time */
211 static int ixv_tx_process_limit = 256;
212 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
213
214 /* Which packet processing uses workqueue or softint */
215 static bool ixv_txrx_workqueue = false;
216
217 /*
218 * Number of TX descriptors per ring,
219 * setting higher than RX as this seems
220 * the better performing choice.
221 */
222 static int ixv_txd = PERFORM_TXD;
223 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
224
225 /* Number of RX descriptors per ring */
226 static int ixv_rxd = PERFORM_RXD;
227 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
228
229 /* Legacy Transmit (single queue) */
230 static int ixv_enable_legacy_tx = 0;
231 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
232
233 #ifdef NET_MPSAFE
234 #define IXGBE_MPSAFE 1
235 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
236 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
237 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
238 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
239 #else
240 #define IXGBE_CALLOUT_FLAGS 0
241 #define IXGBE_SOFTINT_FLAGS 0
242 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
243 #define IXGBE_TASKLET_WQ_FLAGS 0
244 #endif
245 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
246
247 #if 0
248 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
249 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
250 #endif
251
252 /************************************************************************
253 * ixv_probe - Device identification routine
254 *
255 * Determines if the driver should be loaded on
256 * adapter based on its PCI vendor/device ID.
257 *
258 * return BUS_PROBE_DEFAULT on success, positive on failure
259 ************************************************************************/
260 static int
261 ixv_probe(device_t dev, cfdata_t cf, void *aux)
262 {
263 #ifdef __HAVE_PCI_MSI_MSIX
264 const struct pci_attach_args *pa = aux;
265
266 return (ixv_lookup(pa) != NULL) ? 1 : 0;
267 #else
268 return 0;
269 #endif
270 } /* ixv_probe */
271
272 static const ixgbe_vendor_info_t *
273 ixv_lookup(const struct pci_attach_args *pa)
274 {
275 const ixgbe_vendor_info_t *ent;
276 pcireg_t subid;
277
278 INIT_DEBUGOUT("ixv_lookup: begin");
279
280 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
281 return NULL;
282
283 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
284
285 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
286 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
287 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
288 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
289 (ent->subvendor_id == 0)) &&
290 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
291 (ent->subdevice_id == 0))) {
292 return ent;
293 }
294 }
295
296 return NULL;
297 }
298
299 /************************************************************************
300 * ixv_attach - Device initialization routine
301 *
302 * Called when the driver is being loaded.
303 * Identifies the type of hardware, allocates all resources
304 * and initializes the hardware.
305 *
306 * return 0 on success, positive on failure
307 ************************************************************************/
308 static void
309 ixv_attach(device_t parent, device_t dev, void *aux)
310 {
311 struct adapter *adapter;
312 struct ixgbe_hw *hw;
313 int error = 0;
314 pcireg_t id, subid;
315 const ixgbe_vendor_info_t *ent;
316 const struct pci_attach_args *pa = aux;
317 const char *apivstr;
318 const char *str;
319 char wqname[MAXCOMLEN];
320 char buf[256];
321
322 INIT_DEBUGOUT("ixv_attach: begin");
323
324 /*
325 * Make sure BUSMASTER is set, on a VM under
326 * KVM it may not be and will break things.
327 */
328 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
329
330 /* Allocate, clear, and link in our adapter structure */
331 adapter = device_private(dev);
332 adapter->hw.back = adapter;
333 adapter->dev = dev;
334 hw = &adapter->hw;
335
336 adapter->init_locked = ixv_init_locked;
337 adapter->stop_locked = ixv_stop_locked;
338
339 adapter->osdep.pc = pa->pa_pc;
340 adapter->osdep.tag = pa->pa_tag;
341 if (pci_dma64_available(pa))
342 adapter->osdep.dmat = pa->pa_dmat64;
343 else
344 adapter->osdep.dmat = pa->pa_dmat;
345 adapter->osdep.attached = false;
346
347 ent = ixv_lookup(pa);
348
349 KASSERT(ent != NULL);
350
351 aprint_normal(": %s, Version - %s\n",
352 ixv_strings[ent->index], ixv_driver_version);
353
354 /* Core Lock Init */
355 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
356
357 /* Do base PCI setup - map BAR0 */
358 if (ixv_allocate_pci_resources(adapter, pa)) {
359 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
360 error = ENXIO;
361 goto err_out;
362 }
363
364 /* SYSCTL APIs */
365 ixv_add_device_sysctls(adapter);
366
367 /* Set up the timer callout and workqueue */
368 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
369 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
370 error = workqueue_create(&adapter->timer_wq, wqname,
371 ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
372 IXGBE_TASKLET_WQ_FLAGS);
373 if (error) {
374 aprint_error_dev(dev,
375 "could not create timer workqueue (%d)\n", error);
376 goto err_out;
377 }
378
379 /* Save off the information about this board */
380 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
381 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
382 hw->vendor_id = PCI_VENDOR(id);
383 hw->device_id = PCI_PRODUCT(id);
384 hw->revision_id =
385 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
386 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
387 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
388
389 /* A subset of set_mac_type */
390 switch (hw->device_id) {
391 case IXGBE_DEV_ID_82599_VF:
392 hw->mac.type = ixgbe_mac_82599_vf;
393 str = "82599 VF";
394 break;
395 case IXGBE_DEV_ID_X540_VF:
396 hw->mac.type = ixgbe_mac_X540_vf;
397 str = "X540 VF";
398 break;
399 case IXGBE_DEV_ID_X550_VF:
400 hw->mac.type = ixgbe_mac_X550_vf;
401 str = "X550 VF";
402 break;
403 case IXGBE_DEV_ID_X550EM_X_VF:
404 hw->mac.type = ixgbe_mac_X550EM_x_vf;
405 str = "X550EM X VF";
406 break;
407 case IXGBE_DEV_ID_X550EM_A_VF:
408 hw->mac.type = ixgbe_mac_X550EM_a_vf;
409 str = "X550EM A VF";
410 break;
411 default:
412 /* Shouldn't get here since probe succeeded */
413 aprint_error_dev(dev, "Unknown device ID!\n");
414 error = ENXIO;
415 goto err_out;
416 break;
417 }
418 aprint_normal_dev(dev, "device %s\n", str);
419
420 ixv_init_device_features(adapter);
421
422 /* Initialize the shared code */
423 error = ixgbe_init_ops_vf(hw);
424 if (error) {
425 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
426 error = EIO;
427 goto err_out;
428 }
429
430 /* Setup the mailbox */
431 ixgbe_init_mbx_params_vf(hw);
432
433 /* Set the right number of segments */
434 adapter->num_segs = IXGBE_82599_SCATTER;
435
436 /* Reset mbox api to 1.0 */
437 error = hw->mac.ops.reset_hw(hw);
438 if (error == IXGBE_ERR_RESET_FAILED)
439 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
440 else if (error)
441 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
442 error);
443 if (error) {
444 error = EIO;
445 goto err_out;
446 }
447
448 error = hw->mac.ops.init_hw(hw);
449 if (error) {
450 aprint_error_dev(dev, "...init_hw() failed!\n");
451 error = EIO;
452 goto err_out;
453 }
454
455 /* Negotiate mailbox API version */
456 error = ixv_negotiate_api(adapter);
457 if (error)
458 aprint_normal_dev(dev,
459 "MBX API negotiation failed during attach!\n");
460 switch (hw->api_version) {
461 case ixgbe_mbox_api_10:
462 apivstr = "1.0";
463 break;
464 case ixgbe_mbox_api_20:
465 apivstr = "2.0";
466 break;
467 case ixgbe_mbox_api_11:
468 apivstr = "1.1";
469 break;
470 case ixgbe_mbox_api_12:
471 apivstr = "1.2";
472 break;
473 case ixgbe_mbox_api_13:
474 apivstr = "1.3";
475 break;
476 default:
477 apivstr = "unknown";
478 break;
479 }
480 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
481
482 /* If no mac address was assigned, make a random one */
483 if (!ixv_check_ether_addr(hw->mac.addr)) {
484 u8 addr[ETHER_ADDR_LEN];
485 uint64_t rndval = cprng_strong64();
486
487 memcpy(addr, &rndval, sizeof(addr));
488 addr[0] &= 0xFE;
489 addr[0] |= 0x02;
490 bcopy(addr, hw->mac.addr, sizeof(addr));
491 }
492
493 /* Register for VLAN events */
494 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
495
496 /* Sysctls for limiting the amount of work done in the taskqueues */
497 ixv_set_sysctl_value(adapter, "rx_processing_limit",
498 "max number of rx packets to process",
499 &adapter->rx_process_limit, ixv_rx_process_limit);
500
501 ixv_set_sysctl_value(adapter, "tx_processing_limit",
502 "max number of tx packets to process",
503 &adapter->tx_process_limit, ixv_tx_process_limit);
504
505 /* Do descriptor calc and sanity checks */
506 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
507 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
508 aprint_error_dev(dev, "TXD config issue, using default!\n");
509 adapter->num_tx_desc = DEFAULT_TXD;
510 } else
511 adapter->num_tx_desc = ixv_txd;
512
513 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
514 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
515 aprint_error_dev(dev, "RXD config issue, using default!\n");
516 adapter->num_rx_desc = DEFAULT_RXD;
517 } else
518 adapter->num_rx_desc = ixv_rxd;
519
520 /* Set default high limit of copying mbuf in rxeof */
521 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
522
523 /* Setup MSI-X */
524 error = ixv_configure_interrupts(adapter);
525 if (error)
526 goto err_out;
527
528 /* Allocate our TX/RX Queues */
529 if (ixgbe_allocate_queues(adapter)) {
530 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
531 error = ENOMEM;
532 goto err_out;
533 }
534
535 /* hw.ix defaults init */
536 adapter->enable_aim = ixv_enable_aim;
537
538 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
539
540 error = ixv_allocate_msix(adapter, pa);
541 if (error) {
542 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
543 goto err_late;
544 }
545
546 /* Setup OS specific network interface */
547 error = ixv_setup_interface(dev, adapter);
548 if (error != 0) {
549 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
550 goto err_late;
551 }
552
553 /* Do the stats setup */
554 ixv_save_stats(adapter);
555 ixv_init_stats(adapter);
556 ixv_add_stats_sysctls(adapter);
557
558 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
559 ixgbe_netmap_attach(adapter);
560
561 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
562 aprint_verbose_dev(dev, "feature cap %s\n", buf);
563 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
564 aprint_verbose_dev(dev, "feature ena %s\n", buf);
565
566 INIT_DEBUGOUT("ixv_attach: end");
567 adapter->osdep.attached = true;
568
569 return;
570
571 err_late:
572 ixgbe_free_queues(adapter);
573 err_out:
574 ixv_free_pci_resources(adapter);
575 IXGBE_CORE_LOCK_DESTROY(adapter);
576
577 return;
578 } /* ixv_attach */
579
580 /************************************************************************
581 * ixv_detach - Device removal routine
582 *
583 * Called when the driver is being removed.
584 * Stops the adapter and deallocates all the resources
585 * that were allocated for driver operation.
586 *
587 * return 0 on success, positive on failure
588 ************************************************************************/
589 static int
590 ixv_detach(device_t dev, int flags)
591 {
592 struct adapter *adapter = device_private(dev);
593 struct ixgbe_hw *hw = &adapter->hw;
594 struct tx_ring *txr = adapter->tx_rings;
595 struct rx_ring *rxr = adapter->rx_rings;
596 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
597
598 INIT_DEBUGOUT("ixv_detach: begin");
599 if (adapter->osdep.attached == false)
600 return 0;
601
602 /* Stop the interface. Callouts are stopped in it. */
603 ixv_ifstop(adapter->ifp, 1);
604
605 #if NVLAN > 0
606 /* Make sure VLANs are not using driver */
607 if (!VLAN_ATTACHED(&adapter->osdep.ec))
608 ; /* nothing to do: no VLANs */
609 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
610 vlan_ifdetach(adapter->ifp);
611 else {
612 aprint_error_dev(dev, "VLANs in use, detach first\n");
613 return EBUSY;
614 }
615 #endif
616
617 ether_ifdetach(adapter->ifp);
618 callout_halt(&adapter->timer, NULL);
619 ixv_free_deferred_handlers(adapter);
620
621 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
622 netmap_detach(adapter->ifp);
623
624 ixv_free_pci_resources(adapter);
625 #if 0 /* XXX the NetBSD port is probably missing something here */
626 bus_generic_detach(dev);
627 #endif
628 if_detach(adapter->ifp);
629 ifmedia_fini(&adapter->media);
630 if_percpuq_destroy(adapter->ipq);
631
632 sysctl_teardown(&adapter->sysctllog);
633 evcnt_detach(&adapter->efbig_tx_dma_setup);
634 evcnt_detach(&adapter->mbuf_defrag_failed);
635 evcnt_detach(&adapter->efbig2_tx_dma_setup);
636 evcnt_detach(&adapter->einval_tx_dma_setup);
637 evcnt_detach(&adapter->other_tx_dma_setup);
638 evcnt_detach(&adapter->eagain_tx_dma_setup);
639 evcnt_detach(&adapter->enomem_tx_dma_setup);
640 evcnt_detach(&adapter->watchdog_events);
641 evcnt_detach(&adapter->tso_err);
642 evcnt_detach(&adapter->admin_irqev);
643 evcnt_detach(&adapter->link_workev);
644
645 txr = adapter->tx_rings;
646 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
647 evcnt_detach(&adapter->queues[i].irqs);
648 evcnt_detach(&adapter->queues[i].handleq);
649 evcnt_detach(&adapter->queues[i].req);
650 evcnt_detach(&txr->no_desc_avail);
651 evcnt_detach(&txr->total_packets);
652 evcnt_detach(&txr->tso_tx);
653 #ifndef IXGBE_LEGACY_TX
654 evcnt_detach(&txr->pcq_drops);
655 #endif
656
657 evcnt_detach(&rxr->rx_packets);
658 evcnt_detach(&rxr->rx_bytes);
659 evcnt_detach(&rxr->rx_copies);
660 evcnt_detach(&rxr->no_jmbuf);
661 evcnt_detach(&rxr->rx_discarded);
662 }
663 evcnt_detach(&stats->ipcs);
664 evcnt_detach(&stats->l4cs);
665 evcnt_detach(&stats->ipcs_bad);
666 evcnt_detach(&stats->l4cs_bad);
667
668 /* Packet Reception Stats */
669 evcnt_detach(&stats->vfgorc);
670 evcnt_detach(&stats->vfgprc);
671 evcnt_detach(&stats->vfmprc);
672
673 /* Packet Transmission Stats */
674 evcnt_detach(&stats->vfgotc);
675 evcnt_detach(&stats->vfgptc);
676
677 /* Mailbox Stats */
678 evcnt_detach(&hw->mbx.stats.msgs_tx);
679 evcnt_detach(&hw->mbx.stats.msgs_rx);
680 evcnt_detach(&hw->mbx.stats.acks);
681 evcnt_detach(&hw->mbx.stats.reqs);
682 evcnt_detach(&hw->mbx.stats.rsts);
683
684 ixgbe_free_queues(adapter);
685
686 IXGBE_CORE_LOCK_DESTROY(adapter);
687
688 return (0);
689 } /* ixv_detach */
690
691 /************************************************************************
692 * ixv_init_locked - Init entry point
693 *
694 * Used in two ways: It is used by the stack as an init entry
695 * point in network interface structure. It is also used
696 * by the driver as a hw/sw initialization routine to get
697 * to a consistent state.
698 *
699 * return 0 on success, positive on failure
700 ************************************************************************/
701 static void
702 ixv_init_locked(struct adapter *adapter)
703 {
704 struct ifnet *ifp = adapter->ifp;
705 device_t dev = adapter->dev;
706 struct ixgbe_hw *hw = &adapter->hw;
707 struct ix_queue *que;
708 int error = 0;
709 uint32_t mask;
710 int i;
711
712 INIT_DEBUGOUT("ixv_init_locked: begin");
713 KASSERT(mutex_owned(&adapter->core_mtx));
714 hw->adapter_stopped = FALSE;
715 hw->mac.ops.stop_adapter(hw);
716 callout_stop(&adapter->timer);
717 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
718 que->disabled_count = 0;
719
720 adapter->max_frame_size =
721 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
722
723 /* reprogram the RAR[0] in case user changed it. */
724 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
725
726 /* Get the latest mac address, User can use a LAA */
727 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
728 IXGBE_ETH_LENGTH_OF_ADDRESS);
729 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
730
731 /* Prepare transmit descriptors and buffers */
732 if (ixgbe_setup_transmit_structures(adapter)) {
733 aprint_error_dev(dev, "Could not setup transmit structures\n");
734 ixv_stop_locked(adapter);
735 return;
736 }
737
738 /* Reset VF and renegotiate mailbox API version */
739 hw->mac.ops.reset_hw(hw);
740 hw->mac.ops.start_hw(hw);
741 error = ixv_negotiate_api(adapter);
742 if (error)
743 device_printf(dev,
744 "Mailbox API negotiation failed in init_locked!\n");
745
746 ixv_initialize_transmit_units(adapter);
747
748 /* Setup Multicast table */
749 ixv_set_rxfilter(adapter);
750
751 /* Use fixed buffer size, even for jumbo frames */
752 adapter->rx_mbuf_sz = MCLBYTES;
753
754 /* Prepare receive descriptors and buffers */
755 error = ixgbe_setup_receive_structures(adapter);
756 if (error) {
757 device_printf(dev,
758 "Could not setup receive structures (err = %d)\n", error);
759 ixv_stop_locked(adapter);
760 return;
761 }
762
763 /* Configure RX settings */
764 ixv_initialize_receive_units(adapter);
765
766 /* Initialize variable holding task enqueue requests interrupts */
767 adapter->task_requests = 0;
768
769 /* Set up VLAN offload and filter */
770 ixv_setup_vlan_support(adapter);
771
772 /* Set up MSI-X routing */
773 ixv_configure_ivars(adapter);
774
775 /* Set up auto-mask */
776 mask = (1 << adapter->vector);
777 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
778 mask |= (1 << que->msix);
779 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
780
781 /* Set moderation on the Link interrupt */
782 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
783
784 /* Stats init */
785 ixv_init_stats(adapter);
786
787 /* Config/Enable Link */
788 hw->mac.get_link_status = TRUE;
789 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
790 FALSE);
791
792 /* Start watchdog */
793 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
794 atomic_store_relaxed(&adapter->timer_pending, 0);
795
796 /* OK to schedule workqueues. */
797 adapter->schedule_wqs_ok = true;
798
799 /* And now turn on interrupts */
800 ixv_enable_intr(adapter);
801
802 /* Update saved flags. See ixgbe_ifflags_cb() */
803 adapter->if_flags = ifp->if_flags;
804 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
805
806 /* Now inform the stack we're ready */
807 ifp->if_flags |= IFF_RUNNING;
808 ifp->if_flags &= ~IFF_OACTIVE;
809
810 return;
811 } /* ixv_init_locked */
812
813 /************************************************************************
814 * ixv_enable_queue
815 ************************************************************************/
816 static inline void
817 ixv_enable_queue(struct adapter *adapter, u32 vector)
818 {
819 struct ixgbe_hw *hw = &adapter->hw;
820 struct ix_queue *que = &adapter->queues[vector];
821 u32 queue = 1UL << vector;
822 u32 mask;
823
824 mutex_enter(&que->dc_mtx);
825 if (que->disabled_count > 0 && --que->disabled_count > 0)
826 goto out;
827
828 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
829 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
830 out:
831 mutex_exit(&que->dc_mtx);
832 } /* ixv_enable_queue */
833
834 /************************************************************************
835 * ixv_disable_queue
836 ************************************************************************/
837 static inline void
838 ixv_disable_queue(struct adapter *adapter, u32 vector)
839 {
840 struct ixgbe_hw *hw = &adapter->hw;
841 struct ix_queue *que = &adapter->queues[vector];
842 u32 queue = 1UL << vector;
843 u32 mask;
844
845 mutex_enter(&que->dc_mtx);
846 if (que->disabled_count++ > 0)
847 goto out;
848
849 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
850 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
851 out:
852 mutex_exit(&que->dc_mtx);
853 } /* ixv_disable_queue */
854
855 #if 0
856 static inline void
857 ixv_rearm_queues(struct adapter *adapter, u64 queues)
858 {
859 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
860 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
861 } /* ixv_rearm_queues */
862 #endif
863
864
865 /************************************************************************
866 * ixv_msix_que - MSI-X Queue Interrupt Service routine
867 ************************************************************************/
868 static int
869 ixv_msix_que(void *arg)
870 {
871 struct ix_queue *que = arg;
872 struct adapter *adapter = que->adapter;
873 struct tx_ring *txr = que->txr;
874 struct rx_ring *rxr = que->rxr;
875 bool more;
876 u32 newitr = 0;
877
878 ixv_disable_queue(adapter, que->msix);
879 ++que->irqs.ev_count;
880
881 #ifdef __NetBSD__
882 /* Don't run ixgbe_rxeof in interrupt context */
883 more = true;
884 #else
885 more = ixgbe_rxeof(que);
886 #endif
887
888 IXGBE_TX_LOCK(txr);
889 ixgbe_txeof(txr);
890 IXGBE_TX_UNLOCK(txr);
891
892 /* Do AIM now? */
893
894 if (adapter->enable_aim == false)
895 goto no_calc;
896 /*
897 * Do Adaptive Interrupt Moderation:
898 * - Write out last calculated setting
899 * - Calculate based on average size over
900 * the last interval.
901 */
902 if (que->eitr_setting)
903 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
904
905 que->eitr_setting = 0;
906
907 /* Idle, do nothing */
908 if ((txr->bytes == 0) && (rxr->bytes == 0))
909 goto no_calc;
910
911 if ((txr->bytes) && (txr->packets))
912 newitr = txr->bytes/txr->packets;
913 if ((rxr->bytes) && (rxr->packets))
914 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
915 newitr += 24; /* account for hardware frame, crc */
916
917 /* set an upper boundary */
918 newitr = uimin(newitr, 3000);
919
920 /* Be nice to the mid range */
921 if ((newitr > 300) && (newitr < 1200))
922 newitr = (newitr / 3);
923 else
924 newitr = (newitr / 2);
925
926 /*
927 * When RSC is used, ITR interval must be larger than RSC_DELAY.
928 * Currently, we use 2us for RSC_DELAY. The minimum value is always
929 * greater than 2us on 100M (and 10M?(not documented)), but it's not
930 * on 1G and higher.
931 */
932 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
933 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
934 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
935 newitr = IXGBE_MIN_RSC_EITR_10G1G;
936 }
937
938 /* save for next interrupt */
939 que->eitr_setting = newitr;
940
941 /* Reset state */
942 txr->bytes = 0;
943 txr->packets = 0;
944 rxr->bytes = 0;
945 rxr->packets = 0;
946
947 no_calc:
948 if (more)
949 softint_schedule(que->que_si);
950 else /* Re-enable this interrupt */
951 ixv_enable_queue(adapter, que->msix);
952
953 return 1;
954 } /* ixv_msix_que */
955
956 /************************************************************************
957 * ixv_msix_mbx
958 ************************************************************************/
959 static int
960 ixv_msix_mbx(void *arg)
961 {
962 struct adapter *adapter = arg;
963 struct ixgbe_hw *hw = &adapter->hw;
964
965 ++adapter->admin_irqev.ev_count;
966 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
967
968 /* Link status change */
969 hw->mac.get_link_status = TRUE;
970 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX);
971 ixv_schedule_admin_tasklet(adapter);
972
973 return 1;
974 } /* ixv_msix_mbx */
975
976 static void
977 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
978 {
979
980 /*
981 * Newer devices than 82598 have VF function, so this function is
982 * simple.
983 */
984 itr |= IXGBE_EITR_CNT_WDIS;
985
986 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
987 }
988
989
990 /************************************************************************
991 * ixv_media_status - Media Ioctl callback
992 *
993 * Called whenever the user queries the status of
994 * the interface using ifconfig.
995 ************************************************************************/
996 static void
997 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
998 {
999 struct adapter *adapter = ifp->if_softc;
1000
1001 INIT_DEBUGOUT("ixv_media_status: begin");
1002 ixv_update_link_status(adapter);
1003
1004 ifmr->ifm_status = IFM_AVALID;
1005 ifmr->ifm_active = IFM_ETHER;
1006
1007 if (adapter->link_active != LINK_STATE_UP) {
1008 ifmr->ifm_active |= IFM_NONE;
1009 return;
1010 }
1011
1012 ifmr->ifm_status |= IFM_ACTIVE;
1013
1014 switch (adapter->link_speed) {
1015 case IXGBE_LINK_SPEED_10GB_FULL:
1016 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1017 break;
1018 case IXGBE_LINK_SPEED_5GB_FULL:
1019 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1020 break;
1021 case IXGBE_LINK_SPEED_2_5GB_FULL:
1022 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1023 break;
1024 case IXGBE_LINK_SPEED_1GB_FULL:
1025 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1026 break;
1027 case IXGBE_LINK_SPEED_100_FULL:
1028 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1029 break;
1030 case IXGBE_LINK_SPEED_10_FULL:
1031 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1032 break;
1033 }
1034
1035 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1036 } /* ixv_media_status */
1037
1038 /************************************************************************
1039 * ixv_media_change - Media Ioctl callback
1040 *
1041 * Called when the user changes speed/duplex using
1042 * media/mediopt option with ifconfig.
1043 ************************************************************************/
1044 static int
1045 ixv_media_change(struct ifnet *ifp)
1046 {
1047 struct adapter *adapter = ifp->if_softc;
1048 struct ifmedia *ifm = &adapter->media;
1049
1050 INIT_DEBUGOUT("ixv_media_change: begin");
1051
1052 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1053 return (EINVAL);
1054
1055 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1056 case IFM_AUTO:
1057 break;
1058 default:
1059 device_printf(adapter->dev, "Only auto media type\n");
1060 return (EINVAL);
1061 }
1062
1063 return (0);
1064 } /* ixv_media_change */
1065
1066 static void
1067 ixv_schedule_admin_tasklet(struct adapter *adapter)
1068 {
1069 if (adapter->schedule_wqs_ok) {
1070 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1071 workqueue_enqueue(adapter->admin_wq,
1072 &adapter->admin_wc, NULL);
1073 }
1074 }
1075
1076 /************************************************************************
1077 * ixv_negotiate_api
1078 *
1079 * Negotiate the Mailbox API with the PF;
1080 * start with the most featured API first.
1081 ************************************************************************/
1082 static int
1083 ixv_negotiate_api(struct adapter *adapter)
1084 {
1085 struct ixgbe_hw *hw = &adapter->hw;
1086 int mbx_api[] = { ixgbe_mbox_api_13,
1087 ixgbe_mbox_api_12,
1088 ixgbe_mbox_api_11,
1089 ixgbe_mbox_api_10,
1090 ixgbe_mbox_api_unknown };
1091 int i = 0;
1092
1093 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1094 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1095 return (0);
1096 i++;
1097 }
1098
1099 return (EINVAL);
1100 } /* ixv_negotiate_api */
1101
1102
1103 /************************************************************************
1104 * ixv_set_rxfilter - Multicast Update
1105 *
1106 * Called whenever multicast address list is updated.
1107 ************************************************************************/
1108 static int
1109 ixv_set_rxfilter(struct adapter *adapter)
1110 {
1111 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
1112 struct ifnet *ifp = adapter->ifp;
1113 struct ixgbe_hw *hw = &adapter->hw;
1114 u8 *update_ptr;
1115 int mcnt = 0;
1116 struct ethercom *ec = &adapter->osdep.ec;
1117 struct ether_multi *enm;
1118 struct ether_multistep step;
1119 bool overflow = false;
1120 int error, rc = 0;
1121
1122 KASSERT(mutex_owned(&adapter->core_mtx));
1123 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
1124
1125 /* 1: For PROMISC */
1126 if (ifp->if_flags & IFF_PROMISC) {
1127 error = hw->mac.ops.update_xcast_mode(hw,
1128 IXGBEVF_XCAST_MODE_PROMISC);
1129 if (error == IXGBE_ERR_NOT_TRUSTED) {
1130 device_printf(adapter->dev,
1131 "this interface is not trusted\n");
1132 error = EPERM;
1133 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1134 device_printf(adapter->dev,
1135 "the PF doesn't support promisc mode\n");
1136 error = EOPNOTSUPP;
1137 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
1138 device_printf(adapter->dev,
1139 "the PF may not in promisc mode\n");
1140 error = EINVAL;
1141 } else if (error) {
1142 device_printf(adapter->dev,
1143 "failed to set promisc mode. error = %d\n",
1144 error);
1145 error = EIO;
1146 } else
1147 return 0;
1148 rc = error;
1149 }
1150
1151 /* 2: For ALLMULTI or normal */
1152 ETHER_LOCK(ec);
1153 ETHER_FIRST_MULTI(step, ec, enm);
1154 while (enm != NULL) {
1155 if ((mcnt >= IXGBE_MAX_VF_MC) ||
1156 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1157 ETHER_ADDR_LEN) != 0)) {
1158 overflow = true;
1159 break;
1160 }
1161 bcopy(enm->enm_addrlo,
1162 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1163 IXGBE_ETH_LENGTH_OF_ADDRESS);
1164 mcnt++;
1165 ETHER_NEXT_MULTI(step, enm);
1166 }
1167 ETHER_UNLOCK(ec);
1168
1169 /* 3: For ALLMULTI */
1170 if (overflow) {
1171 error = hw->mac.ops.update_xcast_mode(hw,
1172 IXGBEVF_XCAST_MODE_ALLMULTI);
1173 if (error == IXGBE_ERR_NOT_TRUSTED) {
1174 device_printf(adapter->dev,
1175 "this interface is not trusted\n");
1176 error = EPERM;
1177 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1178 device_printf(adapter->dev,
1179 "the PF doesn't support allmulti mode\n");
1180 error = EOPNOTSUPP;
1181 } else if (error) {
1182 device_printf(adapter->dev,
1183 "number of Ethernet multicast addresses "
1184 "exceeds the limit (%d). error = %d\n",
1185 IXGBE_MAX_VF_MC, error);
1186 error = ENOSPC;
1187 } else {
1188 ETHER_LOCK(ec);
1189 ec->ec_flags |= ETHER_F_ALLMULTI;
1190 ETHER_UNLOCK(ec);
1191 return rc; /* Promisc might have failed */
1192 }
1193
1194 if (rc == 0)
1195 rc = error;
1196
1197 /* Continue to update the multicast table as many as we can */
1198 }
1199
1200 /* 4: For normal operation */
1201 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
1202 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
1203 /* Normal operation */
1204 ETHER_LOCK(ec);
1205 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1206 ETHER_UNLOCK(ec);
1207 error = 0;
1208 } else if (error) {
1209 device_printf(adapter->dev,
1210 "failed to set Ethernet multicast address "
1211 "operation to normal. error = %d\n", error);
1212 }
1213
1214 update_ptr = mta;
1215
1216 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw,
1217 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1218 if (rc == 0)
1219 rc = error;
1220
1221 return rc;
1222 } /* ixv_set_rxfilter */
1223
1224 /************************************************************************
1225 * ixv_mc_array_itr
1226 *
1227 * An iterator function needed by the multicast shared code.
1228 * It feeds the shared code routine the addresses in the
1229 * array of ixv_set_rxfilter() one by one.
1230 ************************************************************************/
1231 static u8 *
1232 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1233 {
1234 u8 *addr = *update_ptr;
1235 u8 *newptr;
1236
1237 *vmdq = 0;
1238
1239 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1240 *update_ptr = newptr;
1241
1242 return addr;
1243 } /* ixv_mc_array_itr */
1244
1245 /************************************************************************
1246 * ixv_local_timer - Timer routine
1247 *
1248 * Checks for link status, updates statistics,
1249 * and runs the watchdog check.
1250 ************************************************************************/
1251 static void
1252 ixv_local_timer(void *arg)
1253 {
1254 struct adapter *adapter = arg;
1255
1256 if (adapter->schedule_wqs_ok) {
1257 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
1258 workqueue_enqueue(adapter->timer_wq,
1259 &adapter->timer_wc, NULL);
1260 }
1261 }
1262
1263 static void
1264 ixv_handle_timer(struct work *wk, void *context)
1265 {
1266 struct adapter *adapter = context;
1267 device_t dev = adapter->dev;
1268 struct ix_queue *que = adapter->queues;
1269 u64 queues = 0;
1270 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1271 int hung = 0;
1272 int i;
1273
1274 IXGBE_CORE_LOCK(adapter);
1275
1276 if (ixv_check_link(adapter)) {
1277 ixv_init_locked(adapter);
1278 IXGBE_CORE_UNLOCK(adapter);
1279 return;
1280 }
1281
1282 /* Stats Update */
1283 ixv_update_stats(adapter);
1284
1285 /* Update some event counters */
1286 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1287 que = adapter->queues;
1288 for (i = 0; i < adapter->num_queues; i++, que++) {
1289 struct tx_ring *txr = que->txr;
1290
1291 v0 += txr->q_efbig_tx_dma_setup;
1292 v1 += txr->q_mbuf_defrag_failed;
1293 v2 += txr->q_efbig2_tx_dma_setup;
1294 v3 += txr->q_einval_tx_dma_setup;
1295 v4 += txr->q_other_tx_dma_setup;
1296 v5 += txr->q_eagain_tx_dma_setup;
1297 v6 += txr->q_enomem_tx_dma_setup;
1298 v7 += txr->q_tso_err;
1299 }
1300 adapter->efbig_tx_dma_setup.ev_count = v0;
1301 adapter->mbuf_defrag_failed.ev_count = v1;
1302 adapter->efbig2_tx_dma_setup.ev_count = v2;
1303 adapter->einval_tx_dma_setup.ev_count = v3;
1304 adapter->other_tx_dma_setup.ev_count = v4;
1305 adapter->eagain_tx_dma_setup.ev_count = v5;
1306 adapter->enomem_tx_dma_setup.ev_count = v6;
1307 adapter->tso_err.ev_count = v7;
1308
1309 /*
1310 * Check the TX queues status
1311 * - mark hung queues so we don't schedule on them
1312 * - watchdog only if all queues show hung
1313 */
1314 que = adapter->queues;
1315 for (i = 0; i < adapter->num_queues; i++, que++) {
1316 /* Keep track of queues with work for soft irq */
1317 if (que->txr->busy)
1318 queues |= ((u64)1 << que->me);
1319 /*
1320 * Each time txeof runs without cleaning, but there
1321 * are uncleaned descriptors it increments busy. If
1322 * we get to the MAX we declare it hung.
1323 */
1324 if (que->busy == IXGBE_QUEUE_HUNG) {
1325 ++hung;
1326 /* Mark the queue as inactive */
1327 adapter->active_queues &= ~((u64)1 << que->me);
1328 continue;
1329 } else {
1330 /* Check if we've come back from hung */
1331 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1332 adapter->active_queues |= ((u64)1 << que->me);
1333 }
1334 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1335 device_printf(dev,
1336 "Warning queue %d appears to be hung!\n", i);
1337 que->txr->busy = IXGBE_QUEUE_HUNG;
1338 ++hung;
1339 }
1340 }
1341
1342 /* Only truly watchdog if all queues show hung */
1343 if (hung == adapter->num_queues)
1344 goto watchdog;
1345 #if 0
1346 else if (queues != 0) { /* Force an IRQ on queues with work */
1347 ixv_rearm_queues(adapter, queues);
1348 }
1349 #endif
1350
1351 atomic_store_relaxed(&adapter->timer_pending, 0);
1352 IXGBE_CORE_UNLOCK(adapter);
1353 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1354
1355 return;
1356
1357 watchdog:
1358 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1359 adapter->ifp->if_flags &= ~IFF_RUNNING;
1360 adapter->watchdog_events.ev_count++;
1361 ixv_init_locked(adapter);
1362 IXGBE_CORE_UNLOCK(adapter);
1363 } /* ixv_handle_timer */
1364
1365 /************************************************************************
1366 * ixv_update_link_status - Update OS on link state
1367 *
1368 * Note: Only updates the OS on the cached link state.
1369 * The real check of the hardware only happens with
1370 * a link interrupt.
1371 ************************************************************************/
1372 static void
1373 ixv_update_link_status(struct adapter *adapter)
1374 {
1375 struct ifnet *ifp = adapter->ifp;
1376 device_t dev = adapter->dev;
1377
1378 KASSERT(mutex_owned(&adapter->core_mtx));
1379
1380 if (adapter->link_up) {
1381 if (adapter->link_active != LINK_STATE_UP) {
1382 if (bootverbose) {
1383 const char *bpsmsg;
1384
1385 switch (adapter->link_speed) {
1386 case IXGBE_LINK_SPEED_10GB_FULL:
1387 bpsmsg = "10 Gbps";
1388 break;
1389 case IXGBE_LINK_SPEED_5GB_FULL:
1390 bpsmsg = "5 Gbps";
1391 break;
1392 case IXGBE_LINK_SPEED_2_5GB_FULL:
1393 bpsmsg = "2.5 Gbps";
1394 break;
1395 case IXGBE_LINK_SPEED_1GB_FULL:
1396 bpsmsg = "1 Gbps";
1397 break;
1398 case IXGBE_LINK_SPEED_100_FULL:
1399 bpsmsg = "100 Mbps";
1400 break;
1401 case IXGBE_LINK_SPEED_10_FULL:
1402 bpsmsg = "10 Mbps";
1403 break;
1404 default:
1405 bpsmsg = "unknown speed";
1406 break;
1407 }
1408 device_printf(dev, "Link is up %s %s \n",
1409 bpsmsg, "Full Duplex");
1410 }
1411 adapter->link_active = LINK_STATE_UP;
1412 if_link_state_change(ifp, LINK_STATE_UP);
1413 }
1414 } else {
1415 /*
1416 * Do it when link active changes to DOWN. i.e.
1417 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1418 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1419 */
1420 if (adapter->link_active != LINK_STATE_DOWN) {
1421 if (bootverbose)
1422 device_printf(dev, "Link is Down\n");
1423 if_link_state_change(ifp, LINK_STATE_DOWN);
1424 adapter->link_active = LINK_STATE_DOWN;
1425 }
1426 }
1427 } /* ixv_update_link_status */
1428
1429
1430 /************************************************************************
1431 * ixv_stop - Stop the hardware
1432 *
1433 * Disables all traffic on the adapter by issuing a
1434 * global reset on the MAC and deallocates TX/RX buffers.
1435 ************************************************************************/
1436 static void
1437 ixv_ifstop(struct ifnet *ifp, int disable)
1438 {
1439 struct adapter *adapter = ifp->if_softc;
1440
1441 IXGBE_CORE_LOCK(adapter);
1442 ixv_stop_locked(adapter);
1443 IXGBE_CORE_UNLOCK(adapter);
1444
1445 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
1446 atomic_store_relaxed(&adapter->admin_pending, 0);
1447 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
1448 atomic_store_relaxed(&adapter->timer_pending, 0);
1449 }
1450
1451 static void
1452 ixv_stop_locked(void *arg)
1453 {
1454 struct ifnet *ifp;
1455 struct adapter *adapter = arg;
1456 struct ixgbe_hw *hw = &adapter->hw;
1457
1458 ifp = adapter->ifp;
1459
1460 KASSERT(mutex_owned(&adapter->core_mtx));
1461
1462 INIT_DEBUGOUT("ixv_stop_locked: begin\n");
1463 ixv_disable_intr(adapter);
1464
1465 /* Tell the stack that the interface is no longer active */
1466 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1467
1468 hw->mac.ops.reset_hw(hw);
1469 adapter->hw.adapter_stopped = FALSE;
1470 hw->mac.ops.stop_adapter(hw);
1471 callout_stop(&adapter->timer);
1472
1473 /* Don't schedule workqueues. */
1474 adapter->schedule_wqs_ok = false;
1475
1476 /* reprogram the RAR[0] in case user changed it. */
1477 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1478
1479 return;
1480 } /* ixv_stop_locked */
1481
1482
1483 /************************************************************************
1484 * ixv_allocate_pci_resources
1485 ************************************************************************/
1486 static int
1487 ixv_allocate_pci_resources(struct adapter *adapter,
1488 const struct pci_attach_args *pa)
1489 {
1490 pcireg_t memtype, csr;
1491 device_t dev = adapter->dev;
1492 bus_addr_t addr;
1493 int flags;
1494
1495 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1496 switch (memtype) {
1497 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1498 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1499 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1500 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1501 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1502 goto map_err;
1503 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1504 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1505 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1506 }
1507 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1508 adapter->osdep.mem_size, flags,
1509 &adapter->osdep.mem_bus_space_handle) != 0) {
1510 map_err:
1511 adapter->osdep.mem_size = 0;
1512 aprint_error_dev(dev, "unable to map BAR0\n");
1513 return ENXIO;
1514 }
1515 /*
1516 * Enable address decoding for memory range in case it's not
1517 * set.
1518 */
1519 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1520 PCI_COMMAND_STATUS_REG);
1521 csr |= PCI_COMMAND_MEM_ENABLE;
1522 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1523 csr);
1524 break;
1525 default:
1526 aprint_error_dev(dev, "unexpected type on BAR0\n");
1527 return ENXIO;
1528 }
1529
1530 /* Pick up the tuneable queues */
1531 adapter->num_queues = ixv_num_queues;
1532
1533 return (0);
1534 } /* ixv_allocate_pci_resources */
1535
1536 static void
1537 ixv_free_deferred_handlers(struct adapter *adapter)
1538 {
1539 struct ix_queue *que = adapter->queues;
1540 struct tx_ring *txr = adapter->tx_rings;
1541 int i;
1542
1543 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
1544 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
1545 if (txr->txr_si != NULL)
1546 softint_disestablish(txr->txr_si);
1547 }
1548 if (que->que_si != NULL)
1549 softint_disestablish(que->que_si);
1550 }
1551 if (adapter->txr_wq != NULL)
1552 workqueue_destroy(adapter->txr_wq);
1553 if (adapter->txr_wq_enqueued != NULL)
1554 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
1555 if (adapter->que_wq != NULL)
1556 workqueue_destroy(adapter->que_wq);
1557
1558 /* Drain the Mailbox(link) queue */
1559 if (adapter->admin_wq != NULL) {
1560 workqueue_destroy(adapter->admin_wq);
1561 adapter->admin_wq = NULL;
1562 }
1563 if (adapter->timer_wq != NULL) {
1564 workqueue_destroy(adapter->timer_wq);
1565 adapter->timer_wq = NULL;
1566 }
1567 } /* ixv_free_deferred_handlers */
1568
1569 /************************************************************************
1570 * ixv_free_pci_resources
1571 ************************************************************************/
1572 static void
1573 ixv_free_pci_resources(struct adapter * adapter)
1574 {
1575 struct ix_queue *que = adapter->queues;
1576 int rid;
1577
1578 /*
1579 * Release all msix queue resources:
1580 */
1581 for (int i = 0; i < adapter->num_queues; i++, que++) {
1582 if (que->res != NULL)
1583 pci_intr_disestablish(adapter->osdep.pc,
1584 adapter->osdep.ihs[i]);
1585 }
1586
1587
1588 /* Clean the Mailbox interrupt last */
1589 rid = adapter->vector;
1590
1591 if (adapter->osdep.ihs[rid] != NULL) {
1592 pci_intr_disestablish(adapter->osdep.pc,
1593 adapter->osdep.ihs[rid]);
1594 adapter->osdep.ihs[rid] = NULL;
1595 }
1596
1597 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1598 adapter->osdep.nintrs);
1599
1600 if (adapter->osdep.mem_size != 0) {
1601 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1602 adapter->osdep.mem_bus_space_handle,
1603 adapter->osdep.mem_size);
1604 }
1605
1606 return;
1607 } /* ixv_free_pci_resources */
1608
1609 /************************************************************************
1610 * ixv_setup_interface
1611 *
1612 * Setup networking device structure and register an interface.
1613 ************************************************************************/
1614 static int
1615 ixv_setup_interface(device_t dev, struct adapter *adapter)
1616 {
1617 struct ethercom *ec = &adapter->osdep.ec;
1618 struct ifnet *ifp;
1619
1620 INIT_DEBUGOUT("ixv_setup_interface: begin");
1621
1622 ifp = adapter->ifp = &ec->ec_if;
1623 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1624 ifp->if_baudrate = IF_Gbps(10);
1625 ifp->if_init = ixv_init;
1626 ifp->if_stop = ixv_ifstop;
1627 ifp->if_softc = adapter;
1628 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1629 #ifdef IXGBE_MPSAFE
1630 ifp->if_extflags = IFEF_MPSAFE;
1631 #endif
1632 ifp->if_ioctl = ixv_ioctl;
1633 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1634 #if 0
1635 ixv_start_locked = ixgbe_legacy_start_locked;
1636 #endif
1637 } else {
1638 ifp->if_transmit = ixgbe_mq_start;
1639 #if 0
1640 ixv_start_locked = ixgbe_mq_start_locked;
1641 #endif
1642 }
1643 ifp->if_start = ixgbe_legacy_start;
1644 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1645 IFQ_SET_READY(&ifp->if_snd);
1646
1647 if_initialize(ifp);
1648 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1649 ether_ifattach(ifp, adapter->hw.mac.addr);
1650 aprint_normal_dev(dev, "Ethernet address %s\n",
1651 ether_sprintf(adapter->hw.mac.addr));
1652 /*
1653 * We use per TX queue softint, so if_deferred_start_init() isn't
1654 * used.
1655 */
1656 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1657
1658 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1659
1660 /*
1661 * Tell the upper layer(s) we support long frames.
1662 */
1663 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1664
1665 /* Set capability flags */
1666 ifp->if_capabilities |= IFCAP_HWCSUM
1667 | IFCAP_TSOv4
1668 | IFCAP_TSOv6;
1669 ifp->if_capenable = 0;
1670
1671 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1672 | ETHERCAP_VLAN_HWTAGGING
1673 | ETHERCAP_VLAN_HWCSUM
1674 | ETHERCAP_JUMBO_MTU
1675 | ETHERCAP_VLAN_MTU;
1676
1677 /* Enable the above capabilities by default */
1678 ec->ec_capenable = ec->ec_capabilities;
1679
1680 /* Don't enable LRO by default */
1681 #if 0
1682 /* NetBSD doesn't support LRO yet */
1683 ifp->if_capabilities |= IFCAP_LRO;
1684 #endif
1685
1686 /*
1687 * Specify the media types supported by this adapter and register
1688 * callbacks to update media and link information
1689 */
1690 ec->ec_ifmedia = &adapter->media;
1691 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixv_media_change,
1692 ixv_media_status, &adapter->core_mtx);
1693 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1694 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1695
1696 if_register(ifp);
1697
1698 return 0;
1699 } /* ixv_setup_interface */
1700
1701
1702 /************************************************************************
1703 * ixv_initialize_transmit_units - Enable transmit unit.
1704 ************************************************************************/
1705 static void
1706 ixv_initialize_transmit_units(struct adapter *adapter)
1707 {
1708 struct tx_ring *txr = adapter->tx_rings;
1709 struct ixgbe_hw *hw = &adapter->hw;
1710 int i;
1711
1712 for (i = 0; i < adapter->num_queues; i++, txr++) {
1713 u64 tdba = txr->txdma.dma_paddr;
1714 u32 txctrl, txdctl;
1715 int j = txr->me;
1716
1717 /* Set WTHRESH to 8, burst writeback */
1718 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1719 txdctl |= (8 << 16);
1720 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1721
1722 /* Set the HW Tx Head and Tail indices */
1723 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1724 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1725
1726 /* Set Tx Tail register */
1727 txr->tail = IXGBE_VFTDT(j);
1728
1729 txr->txr_no_space = false;
1730
1731 /* Set Ring parameters */
1732 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1733 (tdba & 0x00000000ffffffffULL));
1734 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1735 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1736 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1737 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1738 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1739 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1740
1741 /* Now enable */
1742 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1743 txdctl |= IXGBE_TXDCTL_ENABLE;
1744 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1745 }
1746
1747 return;
1748 } /* ixv_initialize_transmit_units */
1749
1750
1751 /************************************************************************
1752 * ixv_initialize_rss_mapping
1753 ************************************************************************/
1754 static void
1755 ixv_initialize_rss_mapping(struct adapter *adapter)
1756 {
1757 struct ixgbe_hw *hw = &adapter->hw;
1758 u32 reta = 0, mrqc, rss_key[10];
1759 int queue_id;
1760 int i, j;
1761 u32 rss_hash_config;
1762
1763 /* force use default RSS key. */
1764 #ifdef __NetBSD__
1765 rss_getkey((uint8_t *) &rss_key);
1766 #else
1767 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1768 /* Fetch the configured RSS key */
1769 rss_getkey((uint8_t *)&rss_key);
1770 } else {
1771 /* set up random bits */
1772 cprng_fast(&rss_key, sizeof(rss_key));
1773 }
1774 #endif
1775
1776 /* Now fill out hash function seeds */
1777 for (i = 0; i < 10; i++)
1778 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1779
1780 /* Set up the redirection table */
1781 for (i = 0, j = 0; i < 64; i++, j++) {
1782 if (j == adapter->num_queues)
1783 j = 0;
1784
1785 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1786 /*
1787 * Fetch the RSS bucket id for the given indirection
1788 * entry. Cap it at the number of configured buckets
1789 * (which is num_queues.)
1790 */
1791 queue_id = rss_get_indirection_to_bucket(i);
1792 queue_id = queue_id % adapter->num_queues;
1793 } else
1794 queue_id = j;
1795
1796 /*
1797 * The low 8 bits are for hash value (n+0);
1798 * The next 8 bits are for hash value (n+1), etc.
1799 */
1800 reta >>= 8;
1801 reta |= ((uint32_t)queue_id) << 24;
1802 if ((i & 3) == 3) {
1803 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1804 reta = 0;
1805 }
1806 }
1807
1808 /* Perform hash on these packet types */
1809 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1810 rss_hash_config = rss_gethashconfig();
1811 else {
1812 /*
1813 * Disable UDP - IP fragments aren't currently being handled
1814 * and so we end up with a mix of 2-tuple and 4-tuple
1815 * traffic.
1816 */
1817 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1818 | RSS_HASHTYPE_RSS_TCP_IPV4
1819 | RSS_HASHTYPE_RSS_IPV6
1820 | RSS_HASHTYPE_RSS_TCP_IPV6;
1821 }
1822
1823 mrqc = IXGBE_MRQC_RSSEN;
1824 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1825 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1826 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1827 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1828 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1829 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1830 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1831 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1832 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1833 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1834 __func__);
1835 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1836 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1837 __func__);
1838 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1839 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1840 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1841 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1842 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1843 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1844 __func__);
1845 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1846 } /* ixv_initialize_rss_mapping */
1847
1848
1849 /************************************************************************
1850 * ixv_initialize_receive_units - Setup receive registers and features.
1851 ************************************************************************/
1852 static void
1853 ixv_initialize_receive_units(struct adapter *adapter)
1854 {
1855 struct rx_ring *rxr = adapter->rx_rings;
1856 struct ixgbe_hw *hw = &adapter->hw;
1857 struct ifnet *ifp = adapter->ifp;
1858 u32 bufsz, psrtype;
1859
1860 if (ifp->if_mtu > ETHERMTU)
1861 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1862 else
1863 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1864
1865 psrtype = IXGBE_PSRTYPE_TCPHDR
1866 | IXGBE_PSRTYPE_UDPHDR
1867 | IXGBE_PSRTYPE_IPV4HDR
1868 | IXGBE_PSRTYPE_IPV6HDR
1869 | IXGBE_PSRTYPE_L2HDR;
1870
1871 if (adapter->num_queues > 1)
1872 psrtype |= 1 << 29;
1873
1874 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1875
1876 /* Tell PF our max_frame size */
1877 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1878 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1879 }
1880
1881 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1882 u64 rdba = rxr->rxdma.dma_paddr;
1883 u32 reg, rxdctl;
1884 int j = rxr->me;
1885
1886 /* Disable the queue */
1887 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1888 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1889 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1890 for (int k = 0; k < 10; k++) {
1891 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1892 IXGBE_RXDCTL_ENABLE)
1893 msec_delay(1);
1894 else
1895 break;
1896 }
1897 IXGBE_WRITE_BARRIER(hw);
1898 /* Setup the Base and Length of the Rx Descriptor Ring */
1899 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1900 (rdba & 0x00000000ffffffffULL));
1901 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1902 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1903 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1904
1905 /* Reset the ring indices */
1906 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1907 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1908
1909 /* Set up the SRRCTL register */
1910 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1911 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1912 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1913 reg |= bufsz;
1914 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1915 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1916
1917 /* Capture Rx Tail index */
1918 rxr->tail = IXGBE_VFRDT(rxr->me);
1919
1920 /* Do the queue enabling last */
1921 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1922 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1923 for (int k = 0; k < 10; k++) {
1924 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1925 IXGBE_RXDCTL_ENABLE)
1926 break;
1927 msec_delay(1);
1928 }
1929 IXGBE_WRITE_BARRIER(hw);
1930
1931 /* Set the Tail Pointer */
1932 #ifdef DEV_NETMAP
1933 /*
1934 * In netmap mode, we must preserve the buffers made
1935 * available to userspace before the if_init()
1936 * (this is true by default on the TX side, because
1937 * init makes all buffers available to userspace).
1938 *
1939 * netmap_reset() and the device specific routines
1940 * (e.g. ixgbe_setup_receive_rings()) map these
1941 * buffers at the end of the NIC ring, so here we
1942 * must set the RDT (tail) register to make sure
1943 * they are not overwritten.
1944 *
1945 * In this driver the NIC ring starts at RDH = 0,
1946 * RDT points to the last slot available for reception (?),
1947 * so RDT = num_rx_desc - 1 means the whole ring is available.
1948 */
1949 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1950 (ifp->if_capenable & IFCAP_NETMAP)) {
1951 struct netmap_adapter *na = NA(adapter->ifp);
1952 struct netmap_kring *kring = na->rx_rings[i];
1953 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1954
1955 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1956 } else
1957 #endif /* DEV_NETMAP */
1958 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1959 adapter->num_rx_desc - 1);
1960 }
1961
1962 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1963 ixv_initialize_rss_mapping(adapter);
1964 } /* ixv_initialize_receive_units */
1965
1966 /************************************************************************
1967 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1968 *
1969 * Retrieves the TDH value from the hardware
1970 ************************************************************************/
1971 static int
1972 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1973 {
1974 struct sysctlnode node = *rnode;
1975 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1976 uint32_t val;
1977
1978 if (!txr)
1979 return (0);
1980
1981 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1982 node.sysctl_data = &val;
1983 return sysctl_lookup(SYSCTLFN_CALL(&node));
1984 } /* ixv_sysctl_tdh_handler */
1985
1986 /************************************************************************
1987 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1988 *
1989 * Retrieves the TDT value from the hardware
1990 ************************************************************************/
1991 static int
1992 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1993 {
1994 struct sysctlnode node = *rnode;
1995 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1996 uint32_t val;
1997
1998 if (!txr)
1999 return (0);
2000
2001 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
2002 node.sysctl_data = &val;
2003 return sysctl_lookup(SYSCTLFN_CALL(&node));
2004 } /* ixv_sysctl_tdt_handler */
2005
2006 /************************************************************************
2007 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
2008 * handler function
2009 *
2010 * Retrieves the next_to_check value
2011 ************************************************************************/
2012 static int
2013 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2014 {
2015 struct sysctlnode node = *rnode;
2016 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2017 uint32_t val;
2018
2019 if (!rxr)
2020 return (0);
2021
2022 val = rxr->next_to_check;
2023 node.sysctl_data = &val;
2024 return sysctl_lookup(SYSCTLFN_CALL(&node));
2025 } /* ixv_sysctl_next_to_check_handler */
2026
2027 /************************************************************************
2028 * ixv_sysctl_next_to_refresh_handler - Receive Descriptor next to refresh
2029 * handler function
2030 *
2031 * Retrieves the next_to_refresh value
2032 ************************************************************************/
2033 static int
2034 ixv_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS)
2035 {
2036 struct sysctlnode node = *rnode;
2037 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2038 struct adapter *adapter;
2039 uint32_t val;
2040
2041 if (!rxr)
2042 return (0);
2043
2044 adapter = rxr->adapter;
2045 if (ixgbe_fw_recovery_mode_swflag(adapter))
2046 return (EPERM);
2047
2048 val = rxr->next_to_refresh;
2049 node.sysctl_data = &val;
2050 return sysctl_lookup(SYSCTLFN_CALL(&node));
2051 } /* ixv_sysctl_next_to_refresh_handler */
2052
2053 /************************************************************************
2054 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
2055 *
2056 * Retrieves the RDH value from the hardware
2057 ************************************************************************/
2058 static int
2059 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
2060 {
2061 struct sysctlnode node = *rnode;
2062 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2063 uint32_t val;
2064
2065 if (!rxr)
2066 return (0);
2067
2068 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
2069 node.sysctl_data = &val;
2070 return sysctl_lookup(SYSCTLFN_CALL(&node));
2071 } /* ixv_sysctl_rdh_handler */
2072
2073 /************************************************************************
2074 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
2075 *
2076 * Retrieves the RDT value from the hardware
2077 ************************************************************************/
2078 static int
2079 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
2080 {
2081 struct sysctlnode node = *rnode;
2082 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2083 uint32_t val;
2084
2085 if (!rxr)
2086 return (0);
2087
2088 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
2089 node.sysctl_data = &val;
2090 return sysctl_lookup(SYSCTLFN_CALL(&node));
2091 } /* ixv_sysctl_rdt_handler */
2092
2093 static void
2094 ixv_setup_vlan_tagging(struct adapter *adapter)
2095 {
2096 struct ethercom *ec = &adapter->osdep.ec;
2097 struct ixgbe_hw *hw = &adapter->hw;
2098 struct rx_ring *rxr;
2099 u32 ctrl;
2100 int i;
2101 bool hwtagging;
2102
2103 /* Enable HW tagging only if any vlan is attached */
2104 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2105 && VLAN_ATTACHED(ec);
2106
2107 /* Enable the queues */
2108 for (i = 0; i < adapter->num_queues; i++) {
2109 rxr = &adapter->rx_rings[i];
2110 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2111 if (hwtagging)
2112 ctrl |= IXGBE_RXDCTL_VME;
2113 else
2114 ctrl &= ~IXGBE_RXDCTL_VME;
2115 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2116 /*
2117 * Let Rx path know that it needs to store VLAN tag
2118 * as part of extra mbuf info.
2119 */
2120 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2121 }
2122 } /* ixv_setup_vlan_tagging */
2123
2124 /************************************************************************
2125 * ixv_setup_vlan_support
2126 ************************************************************************/
2127 static int
2128 ixv_setup_vlan_support(struct adapter *adapter)
2129 {
2130 struct ethercom *ec = &adapter->osdep.ec;
2131 struct ixgbe_hw *hw = &adapter->hw;
2132 u32 vid, vfta, retry;
2133 struct vlanid_list *vlanidp;
2134 int rv, error = 0;
2135
2136 /*
2137 * This function is called from both if_init and ifflags_cb()
2138 * on NetBSD.
2139 */
2140
2141 /*
2142 * Part 1:
2143 * Setup VLAN HW tagging
2144 */
2145 ixv_setup_vlan_tagging(adapter);
2146
2147 if (!VLAN_ATTACHED(ec))
2148 return 0;
2149
2150 /*
2151 * Part 2:
2152 * Setup VLAN HW filter
2153 */
2154 /* Cleanup shadow_vfta */
2155 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2156 adapter->shadow_vfta[i] = 0;
2157 /* Generate shadow_vfta from ec_vids */
2158 ETHER_LOCK(ec);
2159 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2160 uint32_t idx;
2161
2162 idx = vlanidp->vid / 32;
2163 KASSERT(idx < IXGBE_VFTA_SIZE);
2164 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2165 }
2166 ETHER_UNLOCK(ec);
2167
2168 /*
2169 * A soft reset zero's out the VFTA, so
2170 * we need to repopulate it now.
2171 */
2172 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2173 if (adapter->shadow_vfta[i] == 0)
2174 continue;
2175 vfta = adapter->shadow_vfta[i];
2176 /*
2177 * Reconstruct the vlan id's
2178 * based on the bits set in each
2179 * of the array ints.
2180 */
2181 for (int j = 0; j < 32; j++) {
2182 retry = 0;
2183 if ((vfta & ((u32)1 << j)) == 0)
2184 continue;
2185 vid = (i * 32) + j;
2186
2187 /* Call the shared code mailbox routine */
2188 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2189 FALSE)) != 0) {
2190 if (++retry > 5) {
2191 device_printf(adapter->dev,
2192 "%s: max retry exceeded\n",
2193 __func__);
2194 break;
2195 }
2196 }
2197 if (rv != 0) {
2198 device_printf(adapter->dev,
2199 "failed to set vlan %d\n", vid);
2200 error = EACCES;
2201 }
2202 }
2203 }
2204 return error;
2205 } /* ixv_setup_vlan_support */
2206
2207 static int
2208 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2209 {
2210 struct ifnet *ifp = &ec->ec_if;
2211 struct adapter *adapter = ifp->if_softc;
2212 int rv;
2213
2214 if (set)
2215 rv = ixv_register_vlan(adapter, vid);
2216 else
2217 rv = ixv_unregister_vlan(adapter, vid);
2218
2219 if (rv != 0)
2220 return rv;
2221
2222 /*
2223 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2224 * or 0 to 1.
2225 */
2226 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2227 ixv_setup_vlan_tagging(adapter);
2228
2229 return rv;
2230 }
2231
2232 /************************************************************************
2233 * ixv_register_vlan
2234 *
2235 * Run via a vlan config EVENT, it enables us to use the
2236 * HW Filter table since we can get the vlan id. This just
2237 * creates the entry in the soft version of the VFTA, init
2238 * will repopulate the real table.
2239 ************************************************************************/
2240 static int
2241 ixv_register_vlan(struct adapter *adapter, u16 vtag)
2242 {
2243 struct ixgbe_hw *hw = &adapter->hw;
2244 u16 index, bit;
2245 int error;
2246
2247 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2248 return EINVAL;
2249 IXGBE_CORE_LOCK(adapter);
2250 index = (vtag >> 5) & 0x7F;
2251 bit = vtag & 0x1F;
2252 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2253 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2254 IXGBE_CORE_UNLOCK(adapter);
2255
2256 if (error != 0) {
2257 device_printf(adapter->dev, "failed to register vlan %hu\n",
2258 vtag);
2259 error = EACCES;
2260 }
2261 return error;
2262 } /* ixv_register_vlan */
2263
2264 /************************************************************************
2265 * ixv_unregister_vlan
2266 *
2267 * Run via a vlan unconfig EVENT, remove our entry
2268 * in the soft vfta.
2269 ************************************************************************/
2270 static int
2271 ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2272 {
2273 struct ixgbe_hw *hw = &adapter->hw;
2274 u16 index, bit;
2275 int error;
2276
2277 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2278 return EINVAL;
2279
2280 IXGBE_CORE_LOCK(adapter);
2281 index = (vtag >> 5) & 0x7F;
2282 bit = vtag & 0x1F;
2283 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2284 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2285 IXGBE_CORE_UNLOCK(adapter);
2286
2287 if (error != 0) {
2288 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2289 vtag);
2290 error = EIO;
2291 }
2292 return error;
2293 } /* ixv_unregister_vlan */
2294
2295 /************************************************************************
2296 * ixv_enable_intr
2297 ************************************************************************/
2298 static void
2299 ixv_enable_intr(struct adapter *adapter)
2300 {
2301 struct ixgbe_hw *hw = &adapter->hw;
2302 struct ix_queue *que = adapter->queues;
2303 u32 mask;
2304 int i;
2305
2306 /* For VTEIAC */
2307 mask = (1 << adapter->vector);
2308 for (i = 0; i < adapter->num_queues; i++, que++)
2309 mask |= (1 << que->msix);
2310 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2311
2312 /* For VTEIMS */
2313 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2314 que = adapter->queues;
2315 for (i = 0; i < adapter->num_queues; i++, que++)
2316 ixv_enable_queue(adapter, que->msix);
2317
2318 IXGBE_WRITE_FLUSH(hw);
2319 } /* ixv_enable_intr */
2320
2321 /************************************************************************
2322 * ixv_disable_intr
2323 ************************************************************************/
2324 static void
2325 ixv_disable_intr(struct adapter *adapter)
2326 {
2327 struct ix_queue *que = adapter->queues;
2328
2329 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2330
2331 /* disable interrupts other than queues */
2332 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2333
2334 for (int i = 0; i < adapter->num_queues; i++, que++)
2335 ixv_disable_queue(adapter, que->msix);
2336
2337 IXGBE_WRITE_FLUSH(&adapter->hw);
2338 } /* ixv_disable_intr */
2339
2340 /************************************************************************
2341 * ixv_set_ivar
2342 *
2343 * Setup the correct IVAR register for a particular MSI-X interrupt
2344 * - entry is the register array entry
2345 * - vector is the MSI-X vector for this queue
2346 * - type is RX/TX/MISC
2347 ************************************************************************/
2348 static void
2349 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2350 {
2351 struct ixgbe_hw *hw = &adapter->hw;
2352 u32 ivar, index;
2353
2354 vector |= IXGBE_IVAR_ALLOC_VAL;
2355
2356 if (type == -1) { /* MISC IVAR */
2357 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2358 ivar &= ~0xFF;
2359 ivar |= vector;
2360 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2361 } else { /* RX/TX IVARS */
2362 index = (16 * (entry & 1)) + (8 * type);
2363 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2364 ivar &= ~(0xffUL << index);
2365 ivar |= ((u32)vector << index);
2366 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2367 }
2368 } /* ixv_set_ivar */
2369
2370 /************************************************************************
2371 * ixv_configure_ivars
2372 ************************************************************************/
2373 static void
2374 ixv_configure_ivars(struct adapter *adapter)
2375 {
2376 struct ix_queue *que = adapter->queues;
2377
2378 /* XXX We should sync EITR value calculation with ixgbe.c? */
2379
2380 for (int i = 0; i < adapter->num_queues; i++, que++) {
2381 /* First the RX queue entry */
2382 ixv_set_ivar(adapter, i, que->msix, 0);
2383 /* ... and the TX */
2384 ixv_set_ivar(adapter, i, que->msix, 1);
2385 /* Set an initial value in EITR */
2386 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2387 }
2388
2389 /* For the mailbox interrupt */
2390 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2391 } /* ixv_configure_ivars */
2392
2393
2394 /************************************************************************
2395 * ixv_save_stats
2396 *
2397 * The VF stats registers never have a truly virgin
2398 * starting point, so this routine tries to make an
2399 * artificial one, marking ground zero on attach as
2400 * it were.
2401 ************************************************************************/
2402 static void
2403 ixv_save_stats(struct adapter *adapter)
2404 {
2405 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2406
2407 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2408 stats->saved_reset_vfgprc +=
2409 stats->vfgprc.ev_count - stats->base_vfgprc;
2410 stats->saved_reset_vfgptc +=
2411 stats->vfgptc.ev_count - stats->base_vfgptc;
2412 stats->saved_reset_vfgorc +=
2413 stats->vfgorc.ev_count - stats->base_vfgorc;
2414 stats->saved_reset_vfgotc +=
2415 stats->vfgotc.ev_count - stats->base_vfgotc;
2416 stats->saved_reset_vfmprc +=
2417 stats->vfmprc.ev_count - stats->base_vfmprc;
2418 }
2419 } /* ixv_save_stats */
2420
2421 /************************************************************************
2422 * ixv_init_stats
2423 ************************************************************************/
2424 static void
2425 ixv_init_stats(struct adapter *adapter)
2426 {
2427 struct ixgbe_hw *hw = &adapter->hw;
2428
2429 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2430 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2431 adapter->stats.vf.last_vfgorc |=
2432 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2433
2434 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2435 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2436 adapter->stats.vf.last_vfgotc |=
2437 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2438
2439 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2440
2441 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2442 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2443 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2444 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2445 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2446 } /* ixv_init_stats */
2447
2448 #define UPDATE_STAT_32(reg, last, count) \
2449 { \
2450 u32 current = IXGBE_READ_REG(hw, (reg)); \
2451 if (current < (last)) \
2452 count.ev_count += 0x100000000LL; \
2453 (last) = current; \
2454 count.ev_count &= 0xFFFFFFFF00000000LL; \
2455 count.ev_count |= current; \
2456 }
2457
2458 #define UPDATE_STAT_36(lsb, msb, last, count) \
2459 { \
2460 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2461 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2462 u64 current = ((cur_msb << 32) | cur_lsb); \
2463 if (current < (last)) \
2464 count.ev_count += 0x1000000000LL; \
2465 (last) = current; \
2466 count.ev_count &= 0xFFFFFFF000000000LL; \
2467 count.ev_count |= current; \
2468 }
2469
2470 /************************************************************************
2471 * ixv_update_stats - Update the board statistics counters.
2472 ************************************************************************/
2473 void
2474 ixv_update_stats(struct adapter *adapter)
2475 {
2476 struct ixgbe_hw *hw = &adapter->hw;
2477 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2478
2479 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2480 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2481 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2482 stats->vfgorc);
2483 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2484 stats->vfgotc);
2485 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2486
2487 /* VF doesn't count errors by hardware */
2488
2489 } /* ixv_update_stats */
2490
2491 /************************************************************************
2492 * ixv_sysctl_interrupt_rate_handler
2493 ************************************************************************/
2494 static int
2495 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2496 {
2497 struct sysctlnode node = *rnode;
2498 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2499 struct adapter *adapter = que->adapter;
2500 uint32_t reg, usec, rate;
2501 int error;
2502
2503 if (que == NULL)
2504 return 0;
2505 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2506 usec = ((reg & 0x0FF8) >> 3);
2507 if (usec > 0)
2508 rate = 500000 / usec;
2509 else
2510 rate = 0;
2511 node.sysctl_data = &rate;
2512 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2513 if (error || newp == NULL)
2514 return error;
2515 reg &= ~0xfff; /* default, no limitation */
2516 if (rate > 0 && rate < 500000) {
2517 if (rate < 1000)
2518 rate = 1000;
2519 reg |= ((4000000 / rate) & 0xff8);
2520 /*
2521 * When RSC is used, ITR interval must be larger than
2522 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2523 * The minimum value is always greater than 2us on 100M
2524 * (and 10M?(not documented)), but it's not on 1G and higher.
2525 */
2526 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2527 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2528 if ((adapter->num_queues > 1)
2529 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2530 return EINVAL;
2531 }
2532 ixv_max_interrupt_rate = rate;
2533 } else
2534 ixv_max_interrupt_rate = 0;
2535 ixv_eitr_write(adapter, que->msix, reg);
2536
2537 return (0);
2538 } /* ixv_sysctl_interrupt_rate_handler */
2539
2540 const struct sysctlnode *
2541 ixv_sysctl_instance(struct adapter *adapter)
2542 {
2543 const char *dvname;
2544 struct sysctllog **log;
2545 int rc;
2546 const struct sysctlnode *rnode;
2547
2548 log = &adapter->sysctllog;
2549 dvname = device_xname(adapter->dev);
2550
2551 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2552 0, CTLTYPE_NODE, dvname,
2553 SYSCTL_DESCR("ixv information and settings"),
2554 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2555 goto err;
2556
2557 return rnode;
2558 err:
2559 device_printf(adapter->dev,
2560 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2561 return NULL;
2562 }
2563
2564 static void
2565 ixv_add_device_sysctls(struct adapter *adapter)
2566 {
2567 struct sysctllog **log;
2568 const struct sysctlnode *rnode, *cnode;
2569 device_t dev;
2570
2571 dev = adapter->dev;
2572 log = &adapter->sysctllog;
2573
2574 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2575 aprint_error_dev(dev, "could not create sysctl root\n");
2576 return;
2577 }
2578
2579 if (sysctl_createv(log, 0, &rnode, &cnode,
2580 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
2581 SYSCTL_DESCR("Debug Info"),
2582 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2583 aprint_error_dev(dev, "could not create sysctl\n");
2584
2585 if (sysctl_createv(log, 0, &rnode, &cnode,
2586 CTLFLAG_READWRITE, CTLTYPE_INT,
2587 "rx_copy_len", SYSCTL_DESCR("RX Copy Length"),
2588 ixv_sysctl_rx_copy_len, 0,
2589 (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2590 aprint_error_dev(dev, "could not create sysctl\n");
2591
2592 if (sysctl_createv(log, 0, &rnode, &cnode,
2593 CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim",
2594 SYSCTL_DESCR("Interrupt Moderation"),
2595 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2596 aprint_error_dev(dev, "could not create sysctl\n");
2597
2598 if (sysctl_createv(log, 0, &rnode, &cnode,
2599 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2600 SYSCTL_DESCR("Use workqueue for packet processing"),
2601 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL)
2602 != 0)
2603 aprint_error_dev(dev, "could not create sysctl\n");
2604 }
2605
2606 /************************************************************************
2607 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2608 ************************************************************************/
2609 static void
2610 ixv_add_stats_sysctls(struct adapter *adapter)
2611 {
2612 device_t dev = adapter->dev;
2613 struct tx_ring *txr = adapter->tx_rings;
2614 struct rx_ring *rxr = adapter->rx_rings;
2615 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2616 struct ixgbe_hw *hw = &adapter->hw;
2617 const struct sysctlnode *rnode, *cnode;
2618 struct sysctllog **log = &adapter->sysctllog;
2619 const char *xname = device_xname(dev);
2620
2621 /* Driver Statistics */
2622 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2623 NULL, xname, "Driver tx dma soft fail EFBIG");
2624 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2625 NULL, xname, "m_defrag() failed");
2626 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2627 NULL, xname, "Driver tx dma hard fail EFBIG");
2628 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2629 NULL, xname, "Driver tx dma hard fail EINVAL");
2630 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2631 NULL, xname, "Driver tx dma hard fail other");
2632 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2633 NULL, xname, "Driver tx dma soft fail EAGAIN");
2634 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2635 NULL, xname, "Driver tx dma soft fail ENOMEM");
2636 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2637 NULL, xname, "Watchdog timeouts");
2638 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2639 NULL, xname, "TSO errors");
2640 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
2641 NULL, xname, "Admin MSI-X IRQ Handled");
2642 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
2643 NULL, xname, "Admin event");
2644
2645 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2646 snprintf(adapter->queues[i].evnamebuf,
2647 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2648 xname, i);
2649 snprintf(adapter->queues[i].namebuf,
2650 sizeof(adapter->queues[i].namebuf), "q%d", i);
2651
2652 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2653 aprint_error_dev(dev, "could not create sysctl root\n");
2654 break;
2655 }
2656
2657 if (sysctl_createv(log, 0, &rnode, &rnode,
2658 0, CTLTYPE_NODE,
2659 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2660 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2661 break;
2662
2663 if (sysctl_createv(log, 0, &rnode, &cnode,
2664 CTLFLAG_READWRITE, CTLTYPE_INT,
2665 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2666 ixv_sysctl_interrupt_rate_handler, 0,
2667 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2668 break;
2669
2670 if (sysctl_createv(log, 0, &rnode, &cnode,
2671 CTLFLAG_READONLY, CTLTYPE_INT,
2672 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2673 ixv_sysctl_tdh_handler, 0, (void *)txr,
2674 0, CTL_CREATE, CTL_EOL) != 0)
2675 break;
2676
2677 if (sysctl_createv(log, 0, &rnode, &cnode,
2678 CTLFLAG_READONLY, CTLTYPE_INT,
2679 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2680 ixv_sysctl_tdt_handler, 0, (void *)txr,
2681 0, CTL_CREATE, CTL_EOL) != 0)
2682 break;
2683
2684 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2685 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2686 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2687 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2688 "Handled queue in softint");
2689 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2690 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2691 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2692 NULL, adapter->queues[i].evnamebuf, "TSO");
2693 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2694 NULL, adapter->queues[i].evnamebuf,
2695 "TX Queue No Descriptor Available");
2696 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2697 NULL, adapter->queues[i].evnamebuf,
2698 "Queue Packets Transmitted");
2699 #ifndef IXGBE_LEGACY_TX
2700 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2701 NULL, adapter->queues[i].evnamebuf,
2702 "Packets dropped in pcq");
2703 #endif
2704
2705 #ifdef LRO
2706 struct lro_ctrl *lro = &rxr->lro;
2707 #endif /* LRO */
2708
2709 if (sysctl_createv(log, 0, &rnode, &cnode,
2710 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
2711 SYSCTL_DESCR("Receive Descriptor next to check"),
2712 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2713 CTL_CREATE, CTL_EOL) != 0)
2714 break;
2715
2716 if (sysctl_createv(log, 0, &rnode, &cnode,
2717 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
2718 SYSCTL_DESCR("Receive Descriptor next to refresh"),
2719 ixv_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
2720 CTL_CREATE, CTL_EOL) != 0)
2721 break;
2722
2723 if (sysctl_createv(log, 0, &rnode, &cnode,
2724 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
2725 SYSCTL_DESCR("Receive Descriptor Head"),
2726 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2727 CTL_CREATE, CTL_EOL) != 0)
2728 break;
2729
2730 if (sysctl_createv(log, 0, &rnode, &cnode,
2731 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
2732 SYSCTL_DESCR("Receive Descriptor Tail"),
2733 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2734 CTL_CREATE, CTL_EOL) != 0)
2735 break;
2736
2737 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2738 NULL, adapter->queues[i].evnamebuf,
2739 "Queue Packets Received");
2740 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2741 NULL, adapter->queues[i].evnamebuf,
2742 "Queue Bytes Received");
2743 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2744 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2745 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2746 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2747 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2748 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2749 #ifdef LRO
2750 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2751 CTLFLAG_RD, &lro->lro_queued, 0,
2752 "LRO Queued");
2753 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2754 CTLFLAG_RD, &lro->lro_flushed, 0,
2755 "LRO Flushed");
2756 #endif /* LRO */
2757 }
2758
2759 /* MAC stats get their own sub node */
2760
2761 snprintf(stats->namebuf,
2762 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2763
2764 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2765 stats->namebuf, "rx csum offload - IP");
2766 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2767 stats->namebuf, "rx csum offload - L4");
2768 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2769 stats->namebuf, "rx csum offload - IP bad");
2770 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2771 stats->namebuf, "rx csum offload - L4 bad");
2772
2773 /* Packet Reception Stats */
2774 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2775 xname, "Good Packets Received");
2776 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2777 xname, "Good Octets Received");
2778 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2779 xname, "Multicast Packets Received");
2780 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2781 xname, "Good Packets Transmitted");
2782 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2783 xname, "Good Octets Transmitted");
2784
2785 /* Mailbox Stats */
2786 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2787 xname, "message TXs");
2788 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2789 xname, "message RXs");
2790 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2791 xname, "ACKs");
2792 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2793 xname, "REQs");
2794 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2795 xname, "RSTs");
2796
2797 } /* ixv_add_stats_sysctls */
2798
2799 static void
2800 ixv_clear_evcnt(struct adapter *adapter)
2801 {
2802 struct tx_ring *txr = adapter->tx_rings;
2803 struct rx_ring *rxr = adapter->rx_rings;
2804 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2805 struct ixgbe_hw *hw = &adapter->hw;
2806 int i;
2807
2808 /* Driver Statistics */
2809 adapter->efbig_tx_dma_setup.ev_count = 0;
2810 adapter->mbuf_defrag_failed.ev_count = 0;
2811 adapter->efbig2_tx_dma_setup.ev_count = 0;
2812 adapter->einval_tx_dma_setup.ev_count = 0;
2813 adapter->other_tx_dma_setup.ev_count = 0;
2814 adapter->eagain_tx_dma_setup.ev_count = 0;
2815 adapter->enomem_tx_dma_setup.ev_count = 0;
2816 adapter->watchdog_events.ev_count = 0;
2817 adapter->tso_err.ev_count = 0;
2818 adapter->admin_irqev.ev_count = 0;
2819 adapter->link_workev.ev_count = 0;
2820
2821 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2822 adapter->queues[i].irqs.ev_count = 0;
2823 adapter->queues[i].handleq.ev_count = 0;
2824 adapter->queues[i].req.ev_count = 0;
2825 txr->tso_tx.ev_count = 0;
2826 txr->no_desc_avail.ev_count = 0;
2827 txr->total_packets.ev_count = 0;
2828 #ifndef IXGBE_LEGACY_TX
2829 txr->pcq_drops.ev_count = 0;
2830 #endif
2831 txr->q_efbig_tx_dma_setup = 0;
2832 txr->q_mbuf_defrag_failed = 0;
2833 txr->q_efbig2_tx_dma_setup = 0;
2834 txr->q_einval_tx_dma_setup = 0;
2835 txr->q_other_tx_dma_setup = 0;
2836 txr->q_eagain_tx_dma_setup = 0;
2837 txr->q_enomem_tx_dma_setup = 0;
2838 txr->q_tso_err = 0;
2839
2840 rxr->rx_packets.ev_count = 0;
2841 rxr->rx_bytes.ev_count = 0;
2842 rxr->rx_copies.ev_count = 0;
2843 rxr->no_jmbuf.ev_count = 0;
2844 rxr->rx_discarded.ev_count = 0;
2845 }
2846
2847 /* MAC stats get their own sub node */
2848
2849 stats->ipcs.ev_count = 0;
2850 stats->l4cs.ev_count = 0;
2851 stats->ipcs_bad.ev_count = 0;
2852 stats->l4cs_bad.ev_count = 0;
2853
2854 /* Packet Reception Stats */
2855 stats->vfgprc.ev_count = 0;
2856 stats->vfgorc.ev_count = 0;
2857 stats->vfmprc.ev_count = 0;
2858 stats->vfgptc.ev_count = 0;
2859 stats->vfgotc.ev_count = 0;
2860
2861 /* Mailbox Stats */
2862 hw->mbx.stats.msgs_tx.ev_count = 0;
2863 hw->mbx.stats.msgs_rx.ev_count = 0;
2864 hw->mbx.stats.acks.ev_count = 0;
2865 hw->mbx.stats.reqs.ev_count = 0;
2866 hw->mbx.stats.rsts.ev_count = 0;
2867
2868 } /* ixv_clear_evcnt */
2869
2870 /************************************************************************
2871 * ixv_set_sysctl_value
2872 ************************************************************************/
2873 static void
2874 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2875 const char *description, int *limit, int value)
2876 {
2877 device_t dev = adapter->dev;
2878 struct sysctllog **log;
2879 const struct sysctlnode *rnode, *cnode;
2880
2881 log = &adapter->sysctllog;
2882 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2883 aprint_error_dev(dev, "could not create sysctl root\n");
2884 return;
2885 }
2886 if (sysctl_createv(log, 0, &rnode, &cnode,
2887 CTLFLAG_READWRITE, CTLTYPE_INT,
2888 name, SYSCTL_DESCR(description),
2889 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2890 aprint_error_dev(dev, "could not create sysctl\n");
2891 *limit = value;
2892 } /* ixv_set_sysctl_value */
2893
2894 /************************************************************************
2895 * ixv_print_debug_info
2896 *
2897 * Called only when em_display_debug_stats is enabled.
2898 * Provides a way to take a look at important statistics
2899 * maintained by the driver and hardware.
2900 ************************************************************************/
2901 static void
2902 ixv_print_debug_info(struct adapter *adapter)
2903 {
2904 device_t dev = adapter->dev;
2905 struct ix_queue *que = adapter->queues;
2906 struct rx_ring *rxr;
2907 struct tx_ring *txr;
2908 #ifdef LRO
2909 struct lro_ctrl *lro;
2910 #endif /* LRO */
2911
2912 for (int i = 0; i < adapter->num_queues; i++, que++) {
2913 txr = que->txr;
2914 rxr = que->rxr;
2915 #ifdef LRO
2916 lro = &rxr->lro;
2917 #endif /* LRO */
2918 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2919 que->msix, (long)que->irqs.ev_count);
2920 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2921 rxr->me, (long long)rxr->rx_packets.ev_count);
2922 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2923 rxr->me, (long)rxr->rx_bytes.ev_count);
2924 #ifdef LRO
2925 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2926 rxr->me, (uintmax_t)lro->lro_queued);
2927 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2928 rxr->me, (uintmax_t)lro->lro_flushed);
2929 #endif /* LRO */
2930 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2931 txr->me, (long)txr->total_packets.ev_count);
2932 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2933 txr->me, (long)txr->no_desc_avail.ev_count);
2934 }
2935
2936 device_printf(dev, "Admin IRQ Handled: %lu\n",
2937 (long)adapter->admin_irqev.ev_count);
2938 device_printf(dev, "Admin work Handled: %lu\n",
2939 (long)adapter->link_workev.ev_count);
2940 } /* ixv_print_debug_info */
2941
2942 /************************************************************************
2943 * ixv_sysctl_debug
2944 ************************************************************************/
2945 static int
2946 ixv_sysctl_debug(SYSCTLFN_ARGS)
2947 {
2948 struct sysctlnode node = *rnode;
2949 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2950 int error, result;
2951
2952 node.sysctl_data = &result;
2953 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2954
2955 if (error || newp == NULL)
2956 return error;
2957
2958 if (result == 1)
2959 ixv_print_debug_info(adapter);
2960
2961 return 0;
2962 } /* ixv_sysctl_debug */
2963
2964 /************************************************************************
2965 * ixv_sysctl_rx_copy_len
2966 ************************************************************************/
2967 static int
2968 ixv_sysctl_rx_copy_len(SYSCTLFN_ARGS)
2969 {
2970 struct sysctlnode node = *rnode;
2971 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2972 int error;
2973 int result = adapter->rx_copy_len;
2974
2975 node.sysctl_data = &result;
2976 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2977
2978 if (error || newp == NULL)
2979 return error;
2980
2981 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
2982 return EINVAL;
2983
2984 adapter->rx_copy_len = result;
2985
2986 return 0;
2987 } /* ixgbe_sysctl_rx_copy_len */
2988
2989 /************************************************************************
2990 * ixv_init_device_features
2991 ************************************************************************/
2992 static void
2993 ixv_init_device_features(struct adapter *adapter)
2994 {
2995 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2996 | IXGBE_FEATURE_VF
2997 | IXGBE_FEATURE_RSS
2998 | IXGBE_FEATURE_LEGACY_TX;
2999
3000 /* A tad short on feature flags for VFs, atm. */
3001 switch (adapter->hw.mac.type) {
3002 case ixgbe_mac_82599_vf:
3003 break;
3004 case ixgbe_mac_X540_vf:
3005 break;
3006 case ixgbe_mac_X550_vf:
3007 case ixgbe_mac_X550EM_x_vf:
3008 case ixgbe_mac_X550EM_a_vf:
3009 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
3010 break;
3011 default:
3012 break;
3013 }
3014
3015 /* Enabled by default... */
3016 /* Is a virtual function (VF) */
3017 if (adapter->feat_cap & IXGBE_FEATURE_VF)
3018 adapter->feat_en |= IXGBE_FEATURE_VF;
3019 /* Netmap */
3020 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
3021 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
3022 /* Receive-Side Scaling (RSS) */
3023 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
3024 adapter->feat_en |= IXGBE_FEATURE_RSS;
3025 /* Needs advanced context descriptor regardless of offloads req'd */
3026 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
3027 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
3028
3029 /* Enabled via sysctl... */
3030 /* Legacy (single queue) transmit */
3031 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
3032 ixv_enable_legacy_tx)
3033 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
3034 } /* ixv_init_device_features */
3035
3036 /************************************************************************
3037 * ixv_shutdown - Shutdown entry point
3038 ************************************************************************/
3039 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3040 static int
3041 ixv_shutdown(device_t dev)
3042 {
3043 struct adapter *adapter = device_private(dev);
3044 IXGBE_CORE_LOCK(adapter);
3045 ixv_stop_locked(adapter);
3046 IXGBE_CORE_UNLOCK(adapter);
3047
3048 return (0);
3049 } /* ixv_shutdown */
3050 #endif
3051
3052 static int
3053 ixv_ifflags_cb(struct ethercom *ec)
3054 {
3055 struct ifnet *ifp = &ec->ec_if;
3056 struct adapter *adapter = ifp->if_softc;
3057 u_short saved_flags;
3058 u_short change;
3059 int rv = 0;
3060
3061 IXGBE_CORE_LOCK(adapter);
3062
3063 saved_flags = adapter->if_flags;
3064 change = ifp->if_flags ^ adapter->if_flags;
3065 if (change != 0)
3066 adapter->if_flags = ifp->if_flags;
3067
3068 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3069 rv = ENETRESET;
3070 goto out;
3071 } else if ((change & IFF_PROMISC) != 0) {
3072 rv = ixv_set_rxfilter(adapter);
3073 if (rv != 0) {
3074 /* Restore previous */
3075 adapter->if_flags = saved_flags;
3076 goto out;
3077 }
3078 }
3079
3080 /* Check for ec_capenable. */
3081 change = ec->ec_capenable ^ adapter->ec_capenable;
3082 adapter->ec_capenable = ec->ec_capenable;
3083 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
3084 | ETHERCAP_VLAN_HWFILTER)) != 0) {
3085 rv = ENETRESET;
3086 goto out;
3087 }
3088
3089 /*
3090 * Special handling is not required for ETHERCAP_VLAN_MTU.
3091 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
3092 */
3093
3094 /* Set up VLAN support and filter */
3095 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
3096 rv = ixv_setup_vlan_support(adapter);
3097
3098 out:
3099 IXGBE_CORE_UNLOCK(adapter);
3100
3101 return rv;
3102 }
3103
3104
3105 /************************************************************************
3106 * ixv_ioctl - Ioctl entry point
3107 *
3108 * Called when the user wants to configure the interface.
3109 *
3110 * return 0 on success, positive on failure
3111 ************************************************************************/
3112 static int
3113 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
3114 {
3115 struct adapter *adapter = ifp->if_softc;
3116 struct ixgbe_hw *hw = &adapter->hw;
3117 struct ifcapreq *ifcr = data;
3118 int error;
3119 int l4csum_en;
3120 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
3121 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3122
3123 switch (command) {
3124 case SIOCSIFFLAGS:
3125 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
3126 break;
3127 case SIOCADDMULTI: {
3128 struct ether_multi *enm;
3129 struct ether_multistep step;
3130 struct ethercom *ec = &adapter->osdep.ec;
3131 bool overflow = false;
3132 int mcnt = 0;
3133
3134 /*
3135 * Check the number of multicast address. If it exceeds,
3136 * return ENOSPC.
3137 * Update this code when we support API 1.3.
3138 */
3139 ETHER_LOCK(ec);
3140 ETHER_FIRST_MULTI(step, ec, enm);
3141 while (enm != NULL) {
3142 mcnt++;
3143
3144 /*
3145 * This code is before adding, so one room is required
3146 * at least.
3147 */
3148 if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3149 overflow = true;
3150 break;
3151 }
3152 ETHER_NEXT_MULTI(step, enm);
3153 }
3154 ETHER_UNLOCK(ec);
3155 error = 0;
3156 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3157 error = hw->mac.ops.update_xcast_mode(hw,
3158 IXGBEVF_XCAST_MODE_ALLMULTI);
3159 if (error == IXGBE_ERR_NOT_TRUSTED) {
3160 device_printf(adapter->dev,
3161 "this interface is not trusted\n");
3162 error = EPERM;
3163 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3164 device_printf(adapter->dev,
3165 "the PF doesn't support allmulti mode\n");
3166 error = EOPNOTSUPP;
3167 } else if (error) {
3168 device_printf(adapter->dev,
3169 "number of Ethernet multicast addresses "
3170 "exceeds the limit (%d). error = %d\n",
3171 IXGBE_MAX_VF_MC, error);
3172 error = ENOSPC;
3173 } else
3174 ec->ec_flags |= ETHER_F_ALLMULTI;
3175 }
3176 if (error)
3177 return error;
3178 }
3179 /*FALLTHROUGH*/
3180 case SIOCDELMULTI:
3181 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3182 break;
3183 case SIOCSIFMEDIA:
3184 case SIOCGIFMEDIA:
3185 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3186 break;
3187 case SIOCSIFCAP:
3188 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3189 break;
3190 case SIOCSIFMTU:
3191 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3192 break;
3193 case SIOCZIFDATA:
3194 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3195 ixv_update_stats(adapter);
3196 ixv_clear_evcnt(adapter);
3197 break;
3198 default:
3199 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3200 break;
3201 }
3202
3203 switch (command) {
3204 case SIOCSIFCAP:
3205 /* Layer-4 Rx checksum offload has to be turned on and
3206 * off as a unit.
3207 */
3208 l4csum_en = ifcr->ifcr_capenable & l4csum;
3209 if (l4csum_en != l4csum && l4csum_en != 0)
3210 return EINVAL;
3211 /*FALLTHROUGH*/
3212 case SIOCADDMULTI:
3213 case SIOCDELMULTI:
3214 case SIOCSIFFLAGS:
3215 case SIOCSIFMTU:
3216 default:
3217 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3218 return error;
3219 if ((ifp->if_flags & IFF_RUNNING) == 0)
3220 ;
3221 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3222 IXGBE_CORE_LOCK(adapter);
3223 ixv_init_locked(adapter);
3224 IXGBE_CORE_UNLOCK(adapter);
3225 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3226 /*
3227 * Multicast list has changed; set the hardware filter
3228 * accordingly.
3229 */
3230 IXGBE_CORE_LOCK(adapter);
3231 ixv_disable_intr(adapter);
3232 ixv_set_rxfilter(adapter);
3233 ixv_enable_intr(adapter);
3234 IXGBE_CORE_UNLOCK(adapter);
3235 }
3236 return 0;
3237 }
3238 } /* ixv_ioctl */
3239
3240 /************************************************************************
3241 * ixv_init
3242 ************************************************************************/
3243 static int
3244 ixv_init(struct ifnet *ifp)
3245 {
3246 struct adapter *adapter = ifp->if_softc;
3247
3248 IXGBE_CORE_LOCK(adapter);
3249 ixv_init_locked(adapter);
3250 IXGBE_CORE_UNLOCK(adapter);
3251
3252 return 0;
3253 } /* ixv_init */
3254
3255 /************************************************************************
3256 * ixv_handle_que
3257 ************************************************************************/
3258 static void
3259 ixv_handle_que(void *context)
3260 {
3261 struct ix_queue *que = context;
3262 struct adapter *adapter = que->adapter;
3263 struct tx_ring *txr = que->txr;
3264 struct ifnet *ifp = adapter->ifp;
3265 bool more;
3266
3267 que->handleq.ev_count++;
3268
3269 if (ifp->if_flags & IFF_RUNNING) {
3270 more = ixgbe_rxeof(que);
3271 IXGBE_TX_LOCK(txr);
3272 more |= ixgbe_txeof(txr);
3273 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
3274 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3275 ixgbe_mq_start_locked(ifp, txr);
3276 /* Only for queue 0 */
3277 /* NetBSD still needs this for CBQ */
3278 if ((&adapter->queues[0] == que)
3279 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3280 ixgbe_legacy_start_locked(ifp, txr);
3281 IXGBE_TX_UNLOCK(txr);
3282 if (more) {
3283 que->req.ev_count++;
3284 if (adapter->txrx_use_workqueue) {
3285 /*
3286 * "enqueued flag" is not required here
3287 * the same as ixg(4). See ixgbe_msix_que().
3288 */
3289 workqueue_enqueue(adapter->que_wq,
3290 &que->wq_cookie, curcpu());
3291 } else
3292 softint_schedule(que->que_si);
3293 return;
3294 }
3295 }
3296
3297 /* Re-enable this interrupt */
3298 ixv_enable_queue(adapter, que->msix);
3299
3300 return;
3301 } /* ixv_handle_que */
3302
3303 /************************************************************************
3304 * ixv_handle_que_work
3305 ************************************************************************/
3306 static void
3307 ixv_handle_que_work(struct work *wk, void *context)
3308 {
3309 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3310
3311 /*
3312 * "enqueued flag" is not required here the same as ixg(4).
3313 * See ixgbe_msix_que().
3314 */
3315 ixv_handle_que(que);
3316 }
3317
3318 /************************************************************************
3319 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3320 ************************************************************************/
3321 static int
3322 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
3323 {
3324 device_t dev = adapter->dev;
3325 struct ix_queue *que = adapter->queues;
3326 struct tx_ring *txr = adapter->tx_rings;
3327 int error, msix_ctrl, rid, vector = 0;
3328 pci_chipset_tag_t pc;
3329 pcitag_t tag;
3330 char intrbuf[PCI_INTRSTR_LEN];
3331 char wqname[MAXCOMLEN];
3332 char intr_xname[32];
3333 const char *intrstr = NULL;
3334 kcpuset_t *affinity;
3335 int cpu_id = 0;
3336
3337 pc = adapter->osdep.pc;
3338 tag = adapter->osdep.tag;
3339
3340 adapter->osdep.nintrs = adapter->num_queues + 1;
3341 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3342 adapter->osdep.nintrs) != 0) {
3343 aprint_error_dev(dev,
3344 "failed to allocate MSI-X interrupt\n");
3345 return (ENXIO);
3346 }
3347
3348 kcpuset_create(&affinity, false);
3349 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3350 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3351 device_xname(dev), i);
3352 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3353 sizeof(intrbuf));
3354 #ifdef IXGBE_MPSAFE
3355 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3356 true);
3357 #endif
3358 /* Set the handler function */
3359 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3360 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3361 intr_xname);
3362 if (que->res == NULL) {
3363 pci_intr_release(pc, adapter->osdep.intrs,
3364 adapter->osdep.nintrs);
3365 aprint_error_dev(dev,
3366 "Failed to register QUE handler\n");
3367 kcpuset_destroy(affinity);
3368 return (ENXIO);
3369 }
3370 que->msix = vector;
3371 adapter->active_queues |= (u64)(1 << que->msix);
3372
3373 cpu_id = i;
3374 /* Round-robin affinity */
3375 kcpuset_zero(affinity);
3376 kcpuset_set(affinity, cpu_id % ncpu);
3377 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3378 NULL);
3379 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3380 intrstr);
3381 if (error == 0)
3382 aprint_normal(", bound queue %d to cpu %d\n",
3383 i, cpu_id % ncpu);
3384 else
3385 aprint_normal("\n");
3386
3387 #ifndef IXGBE_LEGACY_TX
3388 txr->txr_si
3389 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3390 ixgbe_deferred_mq_start, txr);
3391 #endif
3392 que->que_si
3393 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3394 ixv_handle_que, que);
3395 if (que->que_si == NULL) {
3396 aprint_error_dev(dev,
3397 "could not establish software interrupt\n");
3398 }
3399 }
3400 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3401 error = workqueue_create(&adapter->txr_wq, wqname,
3402 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3403 IXGBE_WORKQUEUE_FLAGS);
3404 if (error) {
3405 aprint_error_dev(dev,
3406 "couldn't create workqueue for deferred Tx\n");
3407 }
3408 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3409
3410 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3411 error = workqueue_create(&adapter->que_wq, wqname,
3412 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3413 IXGBE_WORKQUEUE_FLAGS);
3414 if (error) {
3415 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
3416 }
3417
3418 /* and Mailbox */
3419 cpu_id++;
3420 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3421 adapter->vector = vector;
3422 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3423 sizeof(intrbuf));
3424 #ifdef IXGBE_MPSAFE
3425 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3426 true);
3427 #endif
3428 /* Set the mbx handler function */
3429 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3430 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3431 intr_xname);
3432 if (adapter->osdep.ihs[vector] == NULL) {
3433 aprint_error_dev(dev, "Failed to register LINK handler\n");
3434 kcpuset_destroy(affinity);
3435 return (ENXIO);
3436 }
3437 /* Round-robin affinity */
3438 kcpuset_zero(affinity);
3439 kcpuset_set(affinity, cpu_id % ncpu);
3440 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
3441 NULL);
3442
3443 aprint_normal_dev(dev,
3444 "for link, interrupting at %s", intrstr);
3445 if (error == 0)
3446 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3447 else
3448 aprint_normal("\n");
3449
3450 /* Tasklets for Mailbox */
3451 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
3452 error = workqueue_create(&adapter->admin_wq, wqname,
3453 ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3454 IXGBE_TASKLET_WQ_FLAGS);
3455 if (error) {
3456 aprint_error_dev(dev,
3457 "could not create admin workqueue (%d)\n", error);
3458 goto err_out;
3459 }
3460
3461 /*
3462 * Due to a broken design QEMU will fail to properly
3463 * enable the guest for MSI-X unless the vectors in
3464 * the table are all set up, so we must rewrite the
3465 * ENABLE in the MSI-X control register again at this
3466 * point to cause it to successfully initialize us.
3467 */
3468 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3469 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3470 rid += PCI_MSIX_CTL;
3471 msix_ctrl = pci_conf_read(pc, tag, rid);
3472 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3473 pci_conf_write(pc, tag, rid, msix_ctrl);
3474 }
3475
3476 kcpuset_destroy(affinity);
3477 return (0);
3478 err_out:
3479 kcpuset_destroy(affinity);
3480 ixv_free_deferred_handlers(adapter);
3481 ixv_free_pci_resources(adapter);
3482 return (error);
3483 } /* ixv_allocate_msix */
3484
3485 /************************************************************************
3486 * ixv_configure_interrupts - Setup MSI-X resources
3487 *
3488 * Note: The VF device MUST use MSI-X, there is no fallback.
3489 ************************************************************************/
3490 static int
3491 ixv_configure_interrupts(struct adapter *adapter)
3492 {
3493 device_t dev = adapter->dev;
3494 int want, queues, msgs;
3495
3496 /* Must have at least 2 MSI-X vectors */
3497 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3498 if (msgs < 2) {
3499 aprint_error_dev(dev, "MSIX config error\n");
3500 return (ENXIO);
3501 }
3502 msgs = MIN(msgs, IXG_MAX_NINTR);
3503
3504 /* Figure out a reasonable auto config value */
3505 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3506
3507 if (ixv_num_queues != 0)
3508 queues = ixv_num_queues;
3509 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3510 queues = IXGBE_VF_MAX_TX_QUEUES;
3511
3512 /*
3513 * Want vectors for the queues,
3514 * plus an additional for mailbox.
3515 */
3516 want = queues + 1;
3517 if (msgs >= want)
3518 msgs = want;
3519 else {
3520 aprint_error_dev(dev,
3521 "MSI-X Configuration Problem, "
3522 "%d vectors but %d queues wanted!\n",
3523 msgs, want);
3524 return -1;
3525 }
3526
3527 adapter->msix_mem = (void *)1; /* XXX */
3528 aprint_normal_dev(dev,
3529 "Using MSI-X interrupts with %d vectors\n", msgs);
3530 adapter->num_queues = queues;
3531
3532 return (0);
3533 } /* ixv_configure_interrupts */
3534
3535
3536 /************************************************************************
3537 * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
3538 *
3539 * Done outside of interrupt context since the driver might sleep
3540 ************************************************************************/
3541 static void
3542 ixv_handle_admin(struct work *wk, void *context)
3543 {
3544 struct adapter *adapter = context;
3545 struct ixgbe_hw *hw = &adapter->hw;
3546
3547 IXGBE_CORE_LOCK(adapter);
3548
3549 ++adapter->link_workev.ev_count;
3550 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3551 &adapter->link_up, FALSE);
3552 ixv_update_link_status(adapter);
3553
3554 adapter->task_requests = 0;
3555 atomic_store_relaxed(&adapter->admin_pending, 0);
3556
3557 /* Re-enable interrupts */
3558 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
3559
3560 IXGBE_CORE_UNLOCK(adapter);
3561 } /* ixv_handle_admin */
3562
3563 /************************************************************************
3564 * ixv_check_link - Used in the local timer to poll for link changes
3565 ************************************************************************/
3566 static s32
3567 ixv_check_link(struct adapter *adapter)
3568 {
3569 s32 error;
3570
3571 KASSERT(mutex_owned(&adapter->core_mtx));
3572
3573 adapter->hw.mac.get_link_status = TRUE;
3574
3575 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3576 &adapter->link_speed, &adapter->link_up, FALSE);
3577 ixv_update_link_status(adapter);
3578
3579 return error;
3580 } /* ixv_check_link */
3581