ixv.c revision 1.146 1 /*$NetBSD: ixv.c,v 1.146 2020/02/05 10:07:47 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #ifdef _KERNEL_OPT
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_net_mpsafe.h"
41 #endif
42
43 #include "ixgbe.h"
44 #include "vlan.h"
45
46 /************************************************************************
47 * Driver version
48 ************************************************************************/
49 static const char ixv_driver_version[] = "2.0.1-k";
50 /* XXX NetBSD: + 1.5.17 */
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static s32 ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static int ixv_set_rxfilter(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
122
123 static void ixv_setup_vlan_tagging(struct adapter *);
124 static int ixv_setup_vlan_support(struct adapter *);
125 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
126 static int ixv_register_vlan(struct adapter *, u16);
127 static int ixv_unregister_vlan(struct adapter *, u16);
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134 static void ixv_clear_evcnt(struct adapter *);
135
136 /* Sysctl handlers */
137 static void ixv_set_sysctl_value(struct adapter *, const char *,
138 const char *, int *, int);
139 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
140 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
144 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
145
146 /* The MSI-X Interrupt handlers */
147 static int ixv_msix_que(void *);
148 static int ixv_msix_mbx(void *);
149
150 /* Deferred interrupt tasklets */
151 static void ixv_handle_que(void *);
152 static void ixv_handle_link(void *);
153
154 /* Workqueue handler for deferred work */
155 static void ixv_handle_que_work(struct work *, void *);
156
157 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
158 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
159
160 /************************************************************************
161 * FreeBSD Device Interface Entry Points
162 ************************************************************************/
163 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
164 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
165 DVF_DETACH_SHUTDOWN);
166
167 #if 0
168 static driver_t ixv_driver = {
169 "ixv", ixv_methods, sizeof(struct adapter),
170 };
171
172 devclass_t ixv_devclass;
173 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
174 MODULE_DEPEND(ixv, pci, 1, 1, 1);
175 MODULE_DEPEND(ixv, ether, 1, 1, 1);
176 #endif
177
178 /*
179 * TUNEABLE PARAMETERS:
180 */
181
182 /* Number of Queues - do not exceed MSI-X vectors - 1 */
183 static int ixv_num_queues = 0;
184 #define TUNABLE_INT(__x, __y)
185 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
186
187 /*
188 * AIM: Adaptive Interrupt Moderation
189 * which means that the interrupt rate
190 * is varied over time based on the
191 * traffic for that interrupt vector
192 */
193 static bool ixv_enable_aim = false;
194 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
195
196 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
197 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
198
199 /* How many packets rxeof tries to clean at a time */
200 static int ixv_rx_process_limit = 256;
201 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
202
203 /* How many packets txeof tries to clean at a time */
204 static int ixv_tx_process_limit = 256;
205 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
206
207 /* Which packet processing uses workqueue or softint */
208 static bool ixv_txrx_workqueue = false;
209
210 /*
211 * Number of TX descriptors per ring,
212 * setting higher than RX as this seems
213 * the better performing choice.
214 */
215 static int ixv_txd = PERFORM_TXD;
216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
217
218 /* Number of RX descriptors per ring */
219 static int ixv_rxd = PERFORM_RXD;
220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
221
222 /* Legacy Transmit (single queue) */
223 static int ixv_enable_legacy_tx = 0;
224 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
225
226 #ifdef NET_MPSAFE
227 #define IXGBE_MPSAFE 1
228 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
229 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
230 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
231 #else
232 #define IXGBE_CALLOUT_FLAGS 0
233 #define IXGBE_SOFTINFT_FLAGS 0
234 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
235 #endif
236 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
237
238 #if 0
239 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
240 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
241 #endif
242
243 /************************************************************************
244 * ixv_probe - Device identification routine
245 *
246 * Determines if the driver should be loaded on
247 * adapter based on its PCI vendor/device ID.
248 *
249 * return BUS_PROBE_DEFAULT on success, positive on failure
250 ************************************************************************/
251 static int
252 ixv_probe(device_t dev, cfdata_t cf, void *aux)
253 {
254 #ifdef __HAVE_PCI_MSI_MSIX
255 const struct pci_attach_args *pa = aux;
256
257 return (ixv_lookup(pa) != NULL) ? 1 : 0;
258 #else
259 return 0;
260 #endif
261 } /* ixv_probe */
262
263 static const ixgbe_vendor_info_t *
264 ixv_lookup(const struct pci_attach_args *pa)
265 {
266 const ixgbe_vendor_info_t *ent;
267 pcireg_t subid;
268
269 INIT_DEBUGOUT("ixv_lookup: begin");
270
271 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
272 return NULL;
273
274 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
275
276 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
277 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
278 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
279 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
280 (ent->subvendor_id == 0)) &&
281 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
282 (ent->subdevice_id == 0))) {
283 return ent;
284 }
285 }
286
287 return NULL;
288 }
289
290 /************************************************************************
291 * ixv_attach - Device initialization routine
292 *
293 * Called when the driver is being loaded.
294 * Identifies the type of hardware, allocates all resources
295 * and initializes the hardware.
296 *
297 * return 0 on success, positive on failure
298 ************************************************************************/
299 static void
300 ixv_attach(device_t parent, device_t dev, void *aux)
301 {
302 struct adapter *adapter;
303 struct ixgbe_hw *hw;
304 int error = 0;
305 pcireg_t id, subid;
306 const ixgbe_vendor_info_t *ent;
307 const struct pci_attach_args *pa = aux;
308 const char *apivstr;
309 const char *str;
310 char buf[256];
311
312 INIT_DEBUGOUT("ixv_attach: begin");
313
314 /*
315 * Make sure BUSMASTER is set, on a VM under
316 * KVM it may not be and will break things.
317 */
318 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
319
320 /* Allocate, clear, and link in our adapter structure */
321 adapter = device_private(dev);
322 adapter->dev = dev;
323 adapter->hw.back = adapter;
324 hw = &adapter->hw;
325
326 adapter->init_locked = ixv_init_locked;
327 adapter->stop_locked = ixv_stop;
328
329 adapter->osdep.pc = pa->pa_pc;
330 adapter->osdep.tag = pa->pa_tag;
331 if (pci_dma64_available(pa))
332 adapter->osdep.dmat = pa->pa_dmat64;
333 else
334 adapter->osdep.dmat = pa->pa_dmat;
335 adapter->osdep.attached = false;
336
337 ent = ixv_lookup(pa);
338
339 KASSERT(ent != NULL);
340
341 aprint_normal(": %s, Version - %s\n",
342 ixv_strings[ent->index], ixv_driver_version);
343
344 /* Core Lock Init*/
345 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
346
347 /* Do base PCI setup - map BAR0 */
348 if (ixv_allocate_pci_resources(adapter, pa)) {
349 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
350 error = ENXIO;
351 goto err_out;
352 }
353
354 /* SYSCTL APIs */
355 ixv_add_device_sysctls(adapter);
356
357 /* Set up the timer callout */
358 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
359
360 /* Save off the information about this board */
361 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
362 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
363 hw->vendor_id = PCI_VENDOR(id);
364 hw->device_id = PCI_PRODUCT(id);
365 hw->revision_id =
366 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
367 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
368 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
369
370 /* A subset of set_mac_type */
371 switch (hw->device_id) {
372 case IXGBE_DEV_ID_82599_VF:
373 hw->mac.type = ixgbe_mac_82599_vf;
374 str = "82599 VF";
375 break;
376 case IXGBE_DEV_ID_X540_VF:
377 hw->mac.type = ixgbe_mac_X540_vf;
378 str = "X540 VF";
379 break;
380 case IXGBE_DEV_ID_X550_VF:
381 hw->mac.type = ixgbe_mac_X550_vf;
382 str = "X550 VF";
383 break;
384 case IXGBE_DEV_ID_X550EM_X_VF:
385 hw->mac.type = ixgbe_mac_X550EM_x_vf;
386 str = "X550EM X VF";
387 break;
388 case IXGBE_DEV_ID_X550EM_A_VF:
389 hw->mac.type = ixgbe_mac_X550EM_a_vf;
390 str = "X550EM A VF";
391 break;
392 default:
393 /* Shouldn't get here since probe succeeded */
394 aprint_error_dev(dev, "Unknown device ID!\n");
395 error = ENXIO;
396 goto err_out;
397 break;
398 }
399 aprint_normal_dev(dev, "device %s\n", str);
400
401 ixv_init_device_features(adapter);
402
403 /* Initialize the shared code */
404 error = ixgbe_init_ops_vf(hw);
405 if (error) {
406 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
407 error = EIO;
408 goto err_out;
409 }
410
411 /* Setup the mailbox */
412 ixgbe_init_mbx_params_vf(hw);
413
414 /* Set the right number of segments */
415 adapter->num_segs = IXGBE_82599_SCATTER;
416
417 /* Reset mbox api to 1.0 */
418 error = hw->mac.ops.reset_hw(hw);
419 if (error == IXGBE_ERR_RESET_FAILED)
420 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
421 else if (error)
422 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
423 error);
424 if (error) {
425 error = EIO;
426 goto err_out;
427 }
428
429 error = hw->mac.ops.init_hw(hw);
430 if (error) {
431 aprint_error_dev(dev, "...init_hw() failed!\n");
432 error = EIO;
433 goto err_out;
434 }
435
436 /* Negotiate mailbox API version */
437 error = ixv_negotiate_api(adapter);
438 if (error)
439 aprint_normal_dev(dev,
440 "MBX API negotiation failed during attach!\n");
441 switch (hw->api_version) {
442 case ixgbe_mbox_api_10:
443 apivstr = "1.0";
444 break;
445 case ixgbe_mbox_api_20:
446 apivstr = "2.0";
447 break;
448 case ixgbe_mbox_api_11:
449 apivstr = "1.1";
450 break;
451 case ixgbe_mbox_api_12:
452 apivstr = "1.2";
453 break;
454 case ixgbe_mbox_api_13:
455 apivstr = "1.3";
456 break;
457 default:
458 apivstr = "unknown";
459 break;
460 }
461 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
462
463 /* If no mac address was assigned, make a random one */
464 if (!ixv_check_ether_addr(hw->mac.addr)) {
465 u8 addr[ETHER_ADDR_LEN];
466 uint64_t rndval = cprng_strong64();
467
468 memcpy(addr, &rndval, sizeof(addr));
469 addr[0] &= 0xFE;
470 addr[0] |= 0x02;
471 bcopy(addr, hw->mac.addr, sizeof(addr));
472 }
473
474 /* Register for VLAN events */
475 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
476
477 /* Sysctls for limiting the amount of work done in the taskqueues */
478 ixv_set_sysctl_value(adapter, "rx_processing_limit",
479 "max number of rx packets to process",
480 &adapter->rx_process_limit, ixv_rx_process_limit);
481
482 ixv_set_sysctl_value(adapter, "tx_processing_limit",
483 "max number of tx packets to process",
484 &adapter->tx_process_limit, ixv_tx_process_limit);
485
486 /* Do descriptor calc and sanity checks */
487 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
488 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
489 aprint_error_dev(dev, "TXD config issue, using default!\n");
490 adapter->num_tx_desc = DEFAULT_TXD;
491 } else
492 adapter->num_tx_desc = ixv_txd;
493
494 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
495 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
496 aprint_error_dev(dev, "RXD config issue, using default!\n");
497 adapter->num_rx_desc = DEFAULT_RXD;
498 } else
499 adapter->num_rx_desc = ixv_rxd;
500
501 /* Setup MSI-X */
502 error = ixv_configure_interrupts(adapter);
503 if (error)
504 goto err_out;
505
506 /* Allocate our TX/RX Queues */
507 if (ixgbe_allocate_queues(adapter)) {
508 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
509 error = ENOMEM;
510 goto err_out;
511 }
512
513 /* hw.ix defaults init */
514 adapter->enable_aim = ixv_enable_aim;
515
516 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
517
518 error = ixv_allocate_msix(adapter, pa);
519 if (error) {
520 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
521 goto err_late;
522 }
523
524 /* Setup OS specific network interface */
525 error = ixv_setup_interface(dev, adapter);
526 if (error != 0) {
527 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
528 goto err_late;
529 }
530
531 /* Do the stats setup */
532 ixv_save_stats(adapter);
533 ixv_init_stats(adapter);
534 ixv_add_stats_sysctls(adapter);
535
536 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
537 ixgbe_netmap_attach(adapter);
538
539 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
540 aprint_verbose_dev(dev, "feature cap %s\n", buf);
541 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
542 aprint_verbose_dev(dev, "feature ena %s\n", buf);
543
544 INIT_DEBUGOUT("ixv_attach: end");
545 adapter->osdep.attached = true;
546
547 return;
548
549 err_late:
550 ixgbe_free_queues(adapter);
551 err_out:
552 ixv_free_pci_resources(adapter);
553 IXGBE_CORE_LOCK_DESTROY(adapter);
554
555 return;
556 } /* ixv_attach */
557
558 /************************************************************************
559 * ixv_detach - Device removal routine
560 *
561 * Called when the driver is being removed.
562 * Stops the adapter and deallocates all the resources
563 * that were allocated for driver operation.
564 *
565 * return 0 on success, positive on failure
566 ************************************************************************/
567 static int
568 ixv_detach(device_t dev, int flags)
569 {
570 struct adapter *adapter = device_private(dev);
571 struct ixgbe_hw *hw = &adapter->hw;
572 struct ix_queue *que = adapter->queues;
573 struct tx_ring *txr = adapter->tx_rings;
574 struct rx_ring *rxr = adapter->rx_rings;
575 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
576
577 INIT_DEBUGOUT("ixv_detach: begin");
578 if (adapter->osdep.attached == false)
579 return 0;
580
581 /* Stop the interface. Callouts are stopped in it. */
582 ixv_ifstop(adapter->ifp, 1);
583
584 #if NVLAN > 0
585 /* Make sure VLANs are not using driver */
586 if (!VLAN_ATTACHED(&adapter->osdep.ec))
587 ; /* nothing to do: no VLANs */
588 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
589 vlan_ifdetach(adapter->ifp);
590 else {
591 aprint_error_dev(dev, "VLANs in use, detach first\n");
592 return EBUSY;
593 }
594 #endif
595
596 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
597 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
598 softint_disestablish(txr->txr_si);
599 softint_disestablish(que->que_si);
600 }
601 if (adapter->txr_wq != NULL)
602 workqueue_destroy(adapter->txr_wq);
603 if (adapter->txr_wq_enqueued != NULL)
604 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
605 if (adapter->que_wq != NULL)
606 workqueue_destroy(adapter->que_wq);
607
608 /* Drain the Mailbox(link) queue */
609 softint_disestablish(adapter->link_si);
610
611 ether_ifdetach(adapter->ifp);
612 callout_halt(&adapter->timer, NULL);
613
614 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
615 netmap_detach(adapter->ifp);
616
617 ixv_free_pci_resources(adapter);
618 #if 0 /* XXX the NetBSD port is probably missing something here */
619 bus_generic_detach(dev);
620 #endif
621 if_detach(adapter->ifp);
622 ifmedia_fini(&adapter->media);
623 if_percpuq_destroy(adapter->ipq);
624
625 sysctl_teardown(&adapter->sysctllog);
626 evcnt_detach(&adapter->efbig_tx_dma_setup);
627 evcnt_detach(&adapter->mbuf_defrag_failed);
628 evcnt_detach(&adapter->efbig2_tx_dma_setup);
629 evcnt_detach(&adapter->einval_tx_dma_setup);
630 evcnt_detach(&adapter->other_tx_dma_setup);
631 evcnt_detach(&adapter->eagain_tx_dma_setup);
632 evcnt_detach(&adapter->enomem_tx_dma_setup);
633 evcnt_detach(&adapter->watchdog_events);
634 evcnt_detach(&adapter->tso_err);
635 evcnt_detach(&adapter->link_irq);
636
637 txr = adapter->tx_rings;
638 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
639 evcnt_detach(&adapter->queues[i].irqs);
640 evcnt_detach(&adapter->queues[i].handleq);
641 evcnt_detach(&adapter->queues[i].req);
642 evcnt_detach(&txr->no_desc_avail);
643 evcnt_detach(&txr->total_packets);
644 evcnt_detach(&txr->tso_tx);
645 #ifndef IXGBE_LEGACY_TX
646 evcnt_detach(&txr->pcq_drops);
647 #endif
648
649 evcnt_detach(&rxr->rx_packets);
650 evcnt_detach(&rxr->rx_bytes);
651 evcnt_detach(&rxr->rx_copies);
652 evcnt_detach(&rxr->no_jmbuf);
653 evcnt_detach(&rxr->rx_discarded);
654 }
655 evcnt_detach(&stats->ipcs);
656 evcnt_detach(&stats->l4cs);
657 evcnt_detach(&stats->ipcs_bad);
658 evcnt_detach(&stats->l4cs_bad);
659
660 /* Packet Reception Stats */
661 evcnt_detach(&stats->vfgorc);
662 evcnt_detach(&stats->vfgprc);
663 evcnt_detach(&stats->vfmprc);
664
665 /* Packet Transmission Stats */
666 evcnt_detach(&stats->vfgotc);
667 evcnt_detach(&stats->vfgptc);
668
669 /* Mailbox Stats */
670 evcnt_detach(&hw->mbx.stats.msgs_tx);
671 evcnt_detach(&hw->mbx.stats.msgs_rx);
672 evcnt_detach(&hw->mbx.stats.acks);
673 evcnt_detach(&hw->mbx.stats.reqs);
674 evcnt_detach(&hw->mbx.stats.rsts);
675
676 ixgbe_free_queues(adapter);
677
678 IXGBE_CORE_LOCK_DESTROY(adapter);
679
680 return (0);
681 } /* ixv_detach */
682
683 /************************************************************************
684 * ixv_init_locked - Init entry point
685 *
686 * Used in two ways: It is used by the stack as an init entry
687 * point in network interface structure. It is also used
688 * by the driver as a hw/sw initialization routine to get
689 * to a consistent state.
690 *
691 * return 0 on success, positive on failure
692 ************************************************************************/
693 static void
694 ixv_init_locked(struct adapter *adapter)
695 {
696 struct ifnet *ifp = adapter->ifp;
697 device_t dev = adapter->dev;
698 struct ixgbe_hw *hw = &adapter->hw;
699 struct ix_queue *que;
700 int error = 0;
701 uint32_t mask;
702 int i;
703
704 INIT_DEBUGOUT("ixv_init_locked: begin");
705 KASSERT(mutex_owned(&adapter->core_mtx));
706 hw->adapter_stopped = FALSE;
707 hw->mac.ops.stop_adapter(hw);
708 callout_stop(&adapter->timer);
709 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
710 que->disabled_count = 0;
711
712 adapter->max_frame_size =
713 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
714
715 /* reprogram the RAR[0] in case user changed it. */
716 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
717
718 /* Get the latest mac address, User can use a LAA */
719 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
720 IXGBE_ETH_LENGTH_OF_ADDRESS);
721 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
722
723 /* Prepare transmit descriptors and buffers */
724 if (ixgbe_setup_transmit_structures(adapter)) {
725 aprint_error_dev(dev, "Could not setup transmit structures\n");
726 ixv_stop(adapter);
727 return;
728 }
729
730 /* Reset VF and renegotiate mailbox API version */
731 hw->mac.ops.reset_hw(hw);
732 hw->mac.ops.start_hw(hw);
733 error = ixv_negotiate_api(adapter);
734 if (error)
735 device_printf(dev,
736 "Mailbox API negotiation failed in init_locked!\n");
737
738 ixv_initialize_transmit_units(adapter);
739
740 /* Setup Multicast table */
741 ixv_set_rxfilter(adapter);
742
743 /*
744 * Determine the correct mbuf pool
745 * for doing jumbo/headersplit
746 */
747 if (adapter->max_frame_size <= MCLBYTES)
748 adapter->rx_mbuf_sz = MCLBYTES;
749 else
750 adapter->rx_mbuf_sz = MJUMPAGESIZE;
751
752 /* Prepare receive descriptors and buffers */
753 if (ixgbe_setup_receive_structures(adapter)) {
754 device_printf(dev, "Could not setup receive structures\n");
755 ixv_stop(adapter);
756 return;
757 }
758
759 /* Configure RX settings */
760 ixv_initialize_receive_units(adapter);
761
762 /* Set up VLAN offload and filter */
763 ixv_setup_vlan_support(adapter);
764
765 /* Set up MSI-X routing */
766 ixv_configure_ivars(adapter);
767
768 /* Set up auto-mask */
769 mask = (1 << adapter->vector);
770 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
771 mask |= (1 << que->msix);
772 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
773
774 /* Set moderation on the Link interrupt */
775 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
776
777 /* Stats init */
778 ixv_init_stats(adapter);
779
780 /* Config/Enable Link */
781 hw->mac.get_link_status = TRUE;
782 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
783 FALSE);
784
785 /* Start watchdog */
786 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
787
788 /* And now turn on interrupts */
789 ixv_enable_intr(adapter);
790
791 /* Update saved flags. See ixgbe_ifflags_cb() */
792 adapter->if_flags = ifp->if_flags;
793 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
794
795 /* Now inform the stack we're ready */
796 ifp->if_flags |= IFF_RUNNING;
797 ifp->if_flags &= ~IFF_OACTIVE;
798
799 return;
800 } /* ixv_init_locked */
801
802 /************************************************************************
803 * ixv_enable_queue
804 ************************************************************************/
805 static inline void
806 ixv_enable_queue(struct adapter *adapter, u32 vector)
807 {
808 struct ixgbe_hw *hw = &adapter->hw;
809 struct ix_queue *que = &adapter->queues[vector];
810 u32 queue = 1UL << vector;
811 u32 mask;
812
813 mutex_enter(&que->dc_mtx);
814 if (que->disabled_count > 0 && --que->disabled_count > 0)
815 goto out;
816
817 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
818 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
819 out:
820 mutex_exit(&que->dc_mtx);
821 } /* ixv_enable_queue */
822
823 /************************************************************************
824 * ixv_disable_queue
825 ************************************************************************/
826 static inline void
827 ixv_disable_queue(struct adapter *adapter, u32 vector)
828 {
829 struct ixgbe_hw *hw = &adapter->hw;
830 struct ix_queue *que = &adapter->queues[vector];
831 u32 queue = 1UL << vector;
832 u32 mask;
833
834 mutex_enter(&que->dc_mtx);
835 if (que->disabled_count++ > 0)
836 goto out;
837
838 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
839 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
840 out:
841 mutex_exit(&que->dc_mtx);
842 } /* ixv_disable_queue */
843
844 #if 0
845 static inline void
846 ixv_rearm_queues(struct adapter *adapter, u64 queues)
847 {
848 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
849 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
850 } /* ixv_rearm_queues */
851 #endif
852
853
854 /************************************************************************
855 * ixv_msix_que - MSI-X Queue Interrupt Service routine
856 ************************************************************************/
857 static int
858 ixv_msix_que(void *arg)
859 {
860 struct ix_queue *que = arg;
861 struct adapter *adapter = que->adapter;
862 struct tx_ring *txr = que->txr;
863 struct rx_ring *rxr = que->rxr;
864 bool more;
865 u32 newitr = 0;
866
867 ixv_disable_queue(adapter, que->msix);
868 ++que->irqs.ev_count;
869
870 #ifdef __NetBSD__
871 /* Don't run ixgbe_rxeof in interrupt context */
872 more = true;
873 #else
874 more = ixgbe_rxeof(que);
875 #endif
876
877 IXGBE_TX_LOCK(txr);
878 ixgbe_txeof(txr);
879 IXGBE_TX_UNLOCK(txr);
880
881 /* Do AIM now? */
882
883 if (adapter->enable_aim == false)
884 goto no_calc;
885 /*
886 * Do Adaptive Interrupt Moderation:
887 * - Write out last calculated setting
888 * - Calculate based on average size over
889 * the last interval.
890 */
891 if (que->eitr_setting)
892 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
893
894 que->eitr_setting = 0;
895
896 /* Idle, do nothing */
897 if ((txr->bytes == 0) && (rxr->bytes == 0))
898 goto no_calc;
899
900 if ((txr->bytes) && (txr->packets))
901 newitr = txr->bytes/txr->packets;
902 if ((rxr->bytes) && (rxr->packets))
903 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
904 newitr += 24; /* account for hardware frame, crc */
905
906 /* set an upper boundary */
907 newitr = uimin(newitr, 3000);
908
909 /* Be nice to the mid range */
910 if ((newitr > 300) && (newitr < 1200))
911 newitr = (newitr / 3);
912 else
913 newitr = (newitr / 2);
914
915 /*
916 * When RSC is used, ITR interval must be larger than RSC_DELAY.
917 * Currently, we use 2us for RSC_DELAY. The minimum value is always
918 * greater than 2us on 100M (and 10M?(not documented)), but it's not
919 * on 1G and higher.
920 */
921 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
922 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
923 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
924 newitr = IXGBE_MIN_RSC_EITR_10G1G;
925 }
926
927 /* save for next interrupt */
928 que->eitr_setting = newitr;
929
930 /* Reset state */
931 txr->bytes = 0;
932 txr->packets = 0;
933 rxr->bytes = 0;
934 rxr->packets = 0;
935
936 no_calc:
937 if (more)
938 softint_schedule(que->que_si);
939 else /* Re-enable this interrupt */
940 ixv_enable_queue(adapter, que->msix);
941
942 return 1;
943 } /* ixv_msix_que */
944
945 /************************************************************************
946 * ixv_msix_mbx
947 ************************************************************************/
948 static int
949 ixv_msix_mbx(void *arg)
950 {
951 struct adapter *adapter = arg;
952 struct ixgbe_hw *hw = &adapter->hw;
953
954 ++adapter->link_irq.ev_count;
955 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
956
957 /* Link status change */
958 hw->mac.get_link_status = TRUE;
959 softint_schedule(adapter->link_si);
960
961 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
962
963 return 1;
964 } /* ixv_msix_mbx */
965
966 static void
967 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
968 {
969
970 /*
971 * Newer devices than 82598 have VF function, so this function is
972 * simple.
973 */
974 itr |= IXGBE_EITR_CNT_WDIS;
975
976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
977 }
978
979
980 /************************************************************************
981 * ixv_media_status - Media Ioctl callback
982 *
983 * Called whenever the user queries the status of
984 * the interface using ifconfig.
985 ************************************************************************/
986 static void
987 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
988 {
989 struct adapter *adapter = ifp->if_softc;
990
991 INIT_DEBUGOUT("ixv_media_status: begin");
992 IXGBE_CORE_LOCK(adapter);
993 ixv_update_link_status(adapter);
994
995 ifmr->ifm_status = IFM_AVALID;
996 ifmr->ifm_active = IFM_ETHER;
997
998 if (adapter->link_active != LINK_STATE_UP) {
999 ifmr->ifm_active |= IFM_NONE;
1000 IXGBE_CORE_UNLOCK(adapter);
1001 return;
1002 }
1003
1004 ifmr->ifm_status |= IFM_ACTIVE;
1005
1006 switch (adapter->link_speed) {
1007 case IXGBE_LINK_SPEED_10GB_FULL:
1008 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1009 break;
1010 case IXGBE_LINK_SPEED_5GB_FULL:
1011 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1012 break;
1013 case IXGBE_LINK_SPEED_2_5GB_FULL:
1014 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1015 break;
1016 case IXGBE_LINK_SPEED_1GB_FULL:
1017 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1018 break;
1019 case IXGBE_LINK_SPEED_100_FULL:
1020 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1021 break;
1022 case IXGBE_LINK_SPEED_10_FULL:
1023 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1024 break;
1025 }
1026
1027 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1028
1029 IXGBE_CORE_UNLOCK(adapter);
1030 } /* ixv_media_status */
1031
1032 /************************************************************************
1033 * ixv_media_change - Media Ioctl callback
1034 *
1035 * Called when the user changes speed/duplex using
1036 * media/mediopt option with ifconfig.
1037 ************************************************************************/
1038 static int
1039 ixv_media_change(struct ifnet *ifp)
1040 {
1041 struct adapter *adapter = ifp->if_softc;
1042 struct ifmedia *ifm = &adapter->media;
1043
1044 INIT_DEBUGOUT("ixv_media_change: begin");
1045
1046 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1047 return (EINVAL);
1048
1049 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1050 case IFM_AUTO:
1051 break;
1052 default:
1053 device_printf(adapter->dev, "Only auto media type\n");
1054 return (EINVAL);
1055 }
1056
1057 return (0);
1058 } /* ixv_media_change */
1059
1060 /************************************************************************
1061 * ixv_negotiate_api
1062 *
1063 * Negotiate the Mailbox API with the PF;
1064 * start with the most featured API first.
1065 ************************************************************************/
1066 static int
1067 ixv_negotiate_api(struct adapter *adapter)
1068 {
1069 struct ixgbe_hw *hw = &adapter->hw;
1070 int mbx_api[] = { ixgbe_mbox_api_13,
1071 ixgbe_mbox_api_12,
1072 ixgbe_mbox_api_11,
1073 ixgbe_mbox_api_10,
1074 ixgbe_mbox_api_unknown };
1075 int i = 0;
1076
1077 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1078 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1079 return (0);
1080 i++;
1081 }
1082
1083 return (EINVAL);
1084 } /* ixv_negotiate_api */
1085
1086
1087 /************************************************************************
1088 * ixv_set_multi - Multicast Update
1089 *
1090 * Called whenever multicast address list is updated.
1091 ************************************************************************/
1092 static int
1093 ixv_set_rxfilter(struct adapter *adapter)
1094 {
1095 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
1096 struct ifnet *ifp = adapter->ifp;
1097 struct ixgbe_hw *hw = &adapter->hw;
1098 u8 *update_ptr;
1099 int mcnt = 0;
1100 struct ethercom *ec = &adapter->osdep.ec;
1101 struct ether_multi *enm;
1102 struct ether_multistep step;
1103 bool overflow = false;
1104 int error, rc = 0;
1105
1106 KASSERT(mutex_owned(&adapter->core_mtx));
1107 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
1108
1109 /* 1: For PROMISC */
1110 if (ifp->if_flags & IFF_PROMISC) {
1111 error = hw->mac.ops.update_xcast_mode(hw,
1112 IXGBEVF_XCAST_MODE_PROMISC);
1113 if (error == IXGBE_ERR_NOT_TRUSTED) {
1114 device_printf(adapter->dev,
1115 "this interface is not trusted\n");
1116 error = EPERM;
1117 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1118 device_printf(adapter->dev,
1119 "the PF doesn't support promisc mode\n");
1120 error = EOPNOTSUPP;
1121 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
1122 device_printf(adapter->dev,
1123 "the PF may not in promisc mode\n");
1124 error = EINVAL;
1125 } else if (error) {
1126 device_printf(adapter->dev,
1127 "failed to set promisc mode. error = %d\n",
1128 error);
1129 error = EIO;
1130 } else
1131 return 0;
1132 rc = error;
1133 }
1134
1135 /* 2: For ALLMULTI or normal */
1136 ETHER_LOCK(ec);
1137 ETHER_FIRST_MULTI(step, ec, enm);
1138 while (enm != NULL) {
1139 if ((mcnt >= IXGBE_MAX_VF_MC) ||
1140 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1141 ETHER_ADDR_LEN) != 0)) {
1142 overflow = true;
1143 break;
1144 }
1145 bcopy(enm->enm_addrlo,
1146 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1147 IXGBE_ETH_LENGTH_OF_ADDRESS);
1148 mcnt++;
1149 ETHER_NEXT_MULTI(step, enm);
1150 }
1151 ETHER_UNLOCK(ec);
1152
1153 /* 3: For ALLMULTI */
1154 if (overflow) {
1155 error = hw->mac.ops.update_xcast_mode(hw,
1156 IXGBEVF_XCAST_MODE_ALLMULTI);
1157 if (error == IXGBE_ERR_NOT_TRUSTED) {
1158 device_printf(adapter->dev,
1159 "this interface is not trusted\n");
1160 error = EPERM;
1161 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1162 device_printf(adapter->dev,
1163 "the PF doesn't support allmulti mode\n");
1164 error = EOPNOTSUPP;
1165 } else if (error) {
1166 device_printf(adapter->dev,
1167 "number of Ethernet multicast addresses "
1168 "exceeds the limit (%d). error = %d\n",
1169 IXGBE_MAX_VF_MC, error);
1170 error = ENOSPC;
1171 } else {
1172 ETHER_LOCK(ec);
1173 ec->ec_flags |= ETHER_F_ALLMULTI;
1174 ETHER_UNLOCK(ec);
1175 return rc; /* Promisc might failed */
1176 }
1177
1178 if (rc == 0)
1179 rc = error;
1180
1181 /* Continue to update the multicast table as many as we can */
1182 }
1183
1184 /* 4: For normal operation */
1185 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
1186 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
1187 /* Normal operation */
1188 ETHER_LOCK(ec);
1189 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1190 ETHER_UNLOCK(ec);
1191 error = 0;
1192 } else if (error) {
1193 device_printf(adapter->dev,
1194 "failed to set Ethernet multicast address "
1195 "operation to normal. error = %d\n", error);
1196 }
1197
1198 update_ptr = mta;
1199
1200 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw,
1201 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1202 if (rc == 0)
1203 rc = error;
1204
1205 return rc;
1206 } /* ixv_set_rxfilter */
1207
1208 /************************************************************************
1209 * ixv_mc_array_itr
1210 *
1211 * An iterator function needed by the multicast shared code.
1212 * It feeds the shared code routine the addresses in the
1213 * array of ixv_set_rxfilter() one by one.
1214 ************************************************************************/
1215 static u8 *
1216 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1217 {
1218 u8 *addr = *update_ptr;
1219 u8 *newptr;
1220
1221 *vmdq = 0;
1222
1223 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1224 *update_ptr = newptr;
1225
1226 return addr;
1227 } /* ixv_mc_array_itr */
1228
1229 /************************************************************************
1230 * ixv_local_timer - Timer routine
1231 *
1232 * Checks for link status, updates statistics,
1233 * and runs the watchdog check.
1234 ************************************************************************/
1235 static void
1236 ixv_local_timer(void *arg)
1237 {
1238 struct adapter *adapter = arg;
1239
1240 IXGBE_CORE_LOCK(adapter);
1241 ixv_local_timer_locked(adapter);
1242 IXGBE_CORE_UNLOCK(adapter);
1243 }
1244
1245 static void
1246 ixv_local_timer_locked(void *arg)
1247 {
1248 struct adapter *adapter = arg;
1249 device_t dev = adapter->dev;
1250 struct ix_queue *que = adapter->queues;
1251 u64 queues = 0;
1252 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1253 int hung = 0;
1254 int i;
1255
1256 KASSERT(mutex_owned(&adapter->core_mtx));
1257
1258 if (ixv_check_link(adapter)) {
1259 ixv_init_locked(adapter);
1260 return;
1261 }
1262
1263 /* Stats Update */
1264 ixv_update_stats(adapter);
1265
1266 /* Update some event counters */
1267 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1268 que = adapter->queues;
1269 for (i = 0; i < adapter->num_queues; i++, que++) {
1270 struct tx_ring *txr = que->txr;
1271
1272 v0 += txr->q_efbig_tx_dma_setup;
1273 v1 += txr->q_mbuf_defrag_failed;
1274 v2 += txr->q_efbig2_tx_dma_setup;
1275 v3 += txr->q_einval_tx_dma_setup;
1276 v4 += txr->q_other_tx_dma_setup;
1277 v5 += txr->q_eagain_tx_dma_setup;
1278 v6 += txr->q_enomem_tx_dma_setup;
1279 v7 += txr->q_tso_err;
1280 }
1281 adapter->efbig_tx_dma_setup.ev_count = v0;
1282 adapter->mbuf_defrag_failed.ev_count = v1;
1283 adapter->efbig2_tx_dma_setup.ev_count = v2;
1284 adapter->einval_tx_dma_setup.ev_count = v3;
1285 adapter->other_tx_dma_setup.ev_count = v4;
1286 adapter->eagain_tx_dma_setup.ev_count = v5;
1287 adapter->enomem_tx_dma_setup.ev_count = v6;
1288 adapter->tso_err.ev_count = v7;
1289
1290 /*
1291 * Check the TX queues status
1292 * - mark hung queues so we don't schedule on them
1293 * - watchdog only if all queues show hung
1294 */
1295 que = adapter->queues;
1296 for (i = 0; i < adapter->num_queues; i++, que++) {
1297 /* Keep track of queues with work for soft irq */
1298 if (que->txr->busy)
1299 queues |= ((u64)1 << que->me);
1300 /*
1301 * Each time txeof runs without cleaning, but there
1302 * are uncleaned descriptors it increments busy. If
1303 * we get to the MAX we declare it hung.
1304 */
1305 if (que->busy == IXGBE_QUEUE_HUNG) {
1306 ++hung;
1307 /* Mark the queue as inactive */
1308 adapter->active_queues &= ~((u64)1 << que->me);
1309 continue;
1310 } else {
1311 /* Check if we've come back from hung */
1312 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1313 adapter->active_queues |= ((u64)1 << que->me);
1314 }
1315 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1316 device_printf(dev,
1317 "Warning queue %d appears to be hung!\n", i);
1318 que->txr->busy = IXGBE_QUEUE_HUNG;
1319 ++hung;
1320 }
1321 }
1322
1323 /* Only truly watchdog if all queues show hung */
1324 if (hung == adapter->num_queues)
1325 goto watchdog;
1326 #if 0
1327 else if (queues != 0) { /* Force an IRQ on queues with work */
1328 ixv_rearm_queues(adapter, queues);
1329 }
1330 #endif
1331
1332 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1333
1334 return;
1335
1336 watchdog:
1337
1338 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1339 adapter->ifp->if_flags &= ~IFF_RUNNING;
1340 adapter->watchdog_events.ev_count++;
1341 ixv_init_locked(adapter);
1342 } /* ixv_local_timer */
1343
1344 /************************************************************************
1345 * ixv_update_link_status - Update OS on link state
1346 *
1347 * Note: Only updates the OS on the cached link state.
1348 * The real check of the hardware only happens with
1349 * a link interrupt.
1350 ************************************************************************/
1351 static void
1352 ixv_update_link_status(struct adapter *adapter)
1353 {
1354 struct ifnet *ifp = adapter->ifp;
1355 device_t dev = adapter->dev;
1356
1357 KASSERT(mutex_owned(&adapter->core_mtx));
1358
1359 if (adapter->link_up) {
1360 if (adapter->link_active != LINK_STATE_UP) {
1361 if (bootverbose) {
1362 const char *bpsmsg;
1363
1364 switch (adapter->link_speed) {
1365 case IXGBE_LINK_SPEED_10GB_FULL:
1366 bpsmsg = "10 Gbps";
1367 break;
1368 case IXGBE_LINK_SPEED_5GB_FULL:
1369 bpsmsg = "5 Gbps";
1370 break;
1371 case IXGBE_LINK_SPEED_2_5GB_FULL:
1372 bpsmsg = "2.5 Gbps";
1373 break;
1374 case IXGBE_LINK_SPEED_1GB_FULL:
1375 bpsmsg = "1 Gbps";
1376 break;
1377 case IXGBE_LINK_SPEED_100_FULL:
1378 bpsmsg = "100 Mbps";
1379 break;
1380 case IXGBE_LINK_SPEED_10_FULL:
1381 bpsmsg = "10 Mbps";
1382 break;
1383 default:
1384 bpsmsg = "unknown speed";
1385 break;
1386 }
1387 device_printf(dev, "Link is up %s %s \n",
1388 bpsmsg, "Full Duplex");
1389 }
1390 adapter->link_active = LINK_STATE_UP;
1391 if_link_state_change(ifp, LINK_STATE_UP);
1392 }
1393 } else {
1394 /*
1395 * Do it when link active changes to DOWN. i.e.
1396 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1397 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1398 */
1399 if (adapter->link_active != LINK_STATE_DOWN) {
1400 if (bootverbose)
1401 device_printf(dev, "Link is Down\n");
1402 if_link_state_change(ifp, LINK_STATE_DOWN);
1403 adapter->link_active = LINK_STATE_DOWN;
1404 }
1405 }
1406 } /* ixv_update_link_status */
1407
1408
1409 /************************************************************************
1410 * ixv_stop - Stop the hardware
1411 *
1412 * Disables all traffic on the adapter by issuing a
1413 * global reset on the MAC and deallocates TX/RX buffers.
1414 ************************************************************************/
1415 static void
1416 ixv_ifstop(struct ifnet *ifp, int disable)
1417 {
1418 struct adapter *adapter = ifp->if_softc;
1419
1420 IXGBE_CORE_LOCK(adapter);
1421 ixv_stop(adapter);
1422 IXGBE_CORE_UNLOCK(adapter);
1423 }
1424
1425 static void
1426 ixv_stop(void *arg)
1427 {
1428 struct ifnet *ifp;
1429 struct adapter *adapter = arg;
1430 struct ixgbe_hw *hw = &adapter->hw;
1431
1432 ifp = adapter->ifp;
1433
1434 KASSERT(mutex_owned(&adapter->core_mtx));
1435
1436 INIT_DEBUGOUT("ixv_stop: begin\n");
1437 ixv_disable_intr(adapter);
1438
1439 /* Tell the stack that the interface is no longer active */
1440 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1441
1442 hw->mac.ops.reset_hw(hw);
1443 adapter->hw.adapter_stopped = FALSE;
1444 hw->mac.ops.stop_adapter(hw);
1445 callout_stop(&adapter->timer);
1446
1447 /* reprogram the RAR[0] in case user changed it. */
1448 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1449
1450 return;
1451 } /* ixv_stop */
1452
1453
1454 /************************************************************************
1455 * ixv_allocate_pci_resources
1456 ************************************************************************/
1457 static int
1458 ixv_allocate_pci_resources(struct adapter *adapter,
1459 const struct pci_attach_args *pa)
1460 {
1461 pcireg_t memtype, csr;
1462 device_t dev = adapter->dev;
1463 bus_addr_t addr;
1464 int flags;
1465
1466 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1467 switch (memtype) {
1468 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1469 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1470 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1471 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1472 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1473 goto map_err;
1474 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1475 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1476 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1477 }
1478 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1479 adapter->osdep.mem_size, flags,
1480 &adapter->osdep.mem_bus_space_handle) != 0) {
1481 map_err:
1482 adapter->osdep.mem_size = 0;
1483 aprint_error_dev(dev, "unable to map BAR0\n");
1484 return ENXIO;
1485 }
1486 /*
1487 * Enable address decoding for memory range in case it's not
1488 * set.
1489 */
1490 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1491 PCI_COMMAND_STATUS_REG);
1492 csr |= PCI_COMMAND_MEM_ENABLE;
1493 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1494 csr);
1495 break;
1496 default:
1497 aprint_error_dev(dev, "unexpected type on BAR0\n");
1498 return ENXIO;
1499 }
1500
1501 /* Pick up the tuneable queues */
1502 adapter->num_queues = ixv_num_queues;
1503
1504 return (0);
1505 } /* ixv_allocate_pci_resources */
1506
1507 /************************************************************************
1508 * ixv_free_pci_resources
1509 ************************************************************************/
1510 static void
1511 ixv_free_pci_resources(struct adapter * adapter)
1512 {
1513 struct ix_queue *que = adapter->queues;
1514 int rid;
1515
1516 /*
1517 * Release all msix queue resources:
1518 */
1519 for (int i = 0; i < adapter->num_queues; i++, que++) {
1520 if (que->res != NULL)
1521 pci_intr_disestablish(adapter->osdep.pc,
1522 adapter->osdep.ihs[i]);
1523 }
1524
1525
1526 /* Clean the Mailbox interrupt last */
1527 rid = adapter->vector;
1528
1529 if (adapter->osdep.ihs[rid] != NULL) {
1530 pci_intr_disestablish(adapter->osdep.pc,
1531 adapter->osdep.ihs[rid]);
1532 adapter->osdep.ihs[rid] = NULL;
1533 }
1534
1535 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1536 adapter->osdep.nintrs);
1537
1538 if (adapter->osdep.mem_size != 0) {
1539 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1540 adapter->osdep.mem_bus_space_handle,
1541 adapter->osdep.mem_size);
1542 }
1543
1544 return;
1545 } /* ixv_free_pci_resources */
1546
1547 /************************************************************************
1548 * ixv_setup_interface
1549 *
1550 * Setup networking device structure and register an interface.
1551 ************************************************************************/
1552 static int
1553 ixv_setup_interface(device_t dev, struct adapter *adapter)
1554 {
1555 struct ethercom *ec = &adapter->osdep.ec;
1556 struct ifnet *ifp;
1557 int rv;
1558
1559 INIT_DEBUGOUT("ixv_setup_interface: begin");
1560
1561 ifp = adapter->ifp = &ec->ec_if;
1562 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1563 ifp->if_baudrate = IF_Gbps(10);
1564 ifp->if_init = ixv_init;
1565 ifp->if_stop = ixv_ifstop;
1566 ifp->if_softc = adapter;
1567 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1568 #ifdef IXGBE_MPSAFE
1569 ifp->if_extflags = IFEF_MPSAFE;
1570 #endif
1571 ifp->if_ioctl = ixv_ioctl;
1572 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1573 #if 0
1574 ixv_start_locked = ixgbe_legacy_start_locked;
1575 #endif
1576 } else {
1577 ifp->if_transmit = ixgbe_mq_start;
1578 #if 0
1579 ixv_start_locked = ixgbe_mq_start_locked;
1580 #endif
1581 }
1582 ifp->if_start = ixgbe_legacy_start;
1583 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1584 IFQ_SET_READY(&ifp->if_snd);
1585
1586 rv = if_initialize(ifp);
1587 if (rv != 0) {
1588 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1589 return rv;
1590 }
1591 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1592 ether_ifattach(ifp, adapter->hw.mac.addr);
1593 aprint_normal_dev(dev, "Ethernet address %s\n",
1594 ether_sprintf(adapter->hw.mac.addr));
1595 /*
1596 * We use per TX queue softint, so if_deferred_start_init() isn't
1597 * used.
1598 */
1599 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1600
1601 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1602
1603 /*
1604 * Tell the upper layer(s) we support long frames.
1605 */
1606 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1607
1608 /* Set capability flags */
1609 ifp->if_capabilities |= IFCAP_HWCSUM
1610 | IFCAP_TSOv4
1611 | IFCAP_TSOv6;
1612 ifp->if_capenable = 0;
1613
1614 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1615 | ETHERCAP_VLAN_HWTAGGING
1616 | ETHERCAP_VLAN_HWCSUM
1617 | ETHERCAP_JUMBO_MTU
1618 | ETHERCAP_VLAN_MTU;
1619
1620 /* Enable the above capabilities by default */
1621 ec->ec_capenable = ec->ec_capabilities;
1622
1623 /* Don't enable LRO by default */
1624 #if 0
1625 /* NetBSD doesn't support LRO yet */
1626 ifp->if_capabilities |= IFCAP_LRO;
1627 #endif
1628
1629 /*
1630 * Specify the media types supported by this adapter and register
1631 * callbacks to update media and link information
1632 */
1633 ec->ec_ifmedia = &adapter->media;
1634 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1635 ixv_media_status);
1636 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1637 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1638
1639 if_register(ifp);
1640
1641 return 0;
1642 } /* ixv_setup_interface */
1643
1644
1645 /************************************************************************
1646 * ixv_initialize_transmit_units - Enable transmit unit.
1647 ************************************************************************/
1648 static void
1649 ixv_initialize_transmit_units(struct adapter *adapter)
1650 {
1651 struct tx_ring *txr = adapter->tx_rings;
1652 struct ixgbe_hw *hw = &adapter->hw;
1653 int i;
1654
1655 for (i = 0; i < adapter->num_queues; i++, txr++) {
1656 u64 tdba = txr->txdma.dma_paddr;
1657 u32 txctrl, txdctl;
1658 int j = txr->me;
1659
1660 /* Set WTHRESH to 8, burst writeback */
1661 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1662 txdctl |= (8 << 16);
1663 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1664
1665 /* Set the HW Tx Head and Tail indices */
1666 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1667 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1668
1669 /* Set Tx Tail register */
1670 txr->tail = IXGBE_VFTDT(j);
1671
1672 txr->txr_no_space = false;
1673
1674 /* Set Ring parameters */
1675 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1676 (tdba & 0x00000000ffffffffULL));
1677 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1678 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1679 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1680 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1681 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1682 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1683
1684 /* Now enable */
1685 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1686 txdctl |= IXGBE_TXDCTL_ENABLE;
1687 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1688 }
1689
1690 return;
1691 } /* ixv_initialize_transmit_units */
1692
1693
1694 /************************************************************************
1695 * ixv_initialize_rss_mapping
1696 ************************************************************************/
1697 static void
1698 ixv_initialize_rss_mapping(struct adapter *adapter)
1699 {
1700 struct ixgbe_hw *hw = &adapter->hw;
1701 u32 reta = 0, mrqc, rss_key[10];
1702 int queue_id;
1703 int i, j;
1704 u32 rss_hash_config;
1705
1706 /* force use default RSS key. */
1707 #ifdef __NetBSD__
1708 rss_getkey((uint8_t *) &rss_key);
1709 #else
1710 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1711 /* Fetch the configured RSS key */
1712 rss_getkey((uint8_t *)&rss_key);
1713 } else {
1714 /* set up random bits */
1715 cprng_fast(&rss_key, sizeof(rss_key));
1716 }
1717 #endif
1718
1719 /* Now fill out hash function seeds */
1720 for (i = 0; i < 10; i++)
1721 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1722
1723 /* Set up the redirection table */
1724 for (i = 0, j = 0; i < 64; i++, j++) {
1725 if (j == adapter->num_queues)
1726 j = 0;
1727
1728 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1729 /*
1730 * Fetch the RSS bucket id for the given indirection
1731 * entry. Cap it at the number of configured buckets
1732 * (which is num_queues.)
1733 */
1734 queue_id = rss_get_indirection_to_bucket(i);
1735 queue_id = queue_id % adapter->num_queues;
1736 } else
1737 queue_id = j;
1738
1739 /*
1740 * The low 8 bits are for hash value (n+0);
1741 * The next 8 bits are for hash value (n+1), etc.
1742 */
1743 reta >>= 8;
1744 reta |= ((uint32_t)queue_id) << 24;
1745 if ((i & 3) == 3) {
1746 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1747 reta = 0;
1748 }
1749 }
1750
1751 /* Perform hash on these packet types */
1752 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1753 rss_hash_config = rss_gethashconfig();
1754 else {
1755 /*
1756 * Disable UDP - IP fragments aren't currently being handled
1757 * and so we end up with a mix of 2-tuple and 4-tuple
1758 * traffic.
1759 */
1760 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1761 | RSS_HASHTYPE_RSS_TCP_IPV4
1762 | RSS_HASHTYPE_RSS_IPV6
1763 | RSS_HASHTYPE_RSS_TCP_IPV6;
1764 }
1765
1766 mrqc = IXGBE_MRQC_RSSEN;
1767 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1768 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1769 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1770 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1771 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1772 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1773 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1774 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1775 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1776 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1777 __func__);
1778 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1779 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1780 __func__);
1781 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1782 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1783 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1784 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1785 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1786 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1787 __func__);
1788 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1789 } /* ixv_initialize_rss_mapping */
1790
1791
1792 /************************************************************************
1793 * ixv_initialize_receive_units - Setup receive registers and features.
1794 ************************************************************************/
1795 static void
1796 ixv_initialize_receive_units(struct adapter *adapter)
1797 {
1798 struct rx_ring *rxr = adapter->rx_rings;
1799 struct ixgbe_hw *hw = &adapter->hw;
1800 struct ifnet *ifp = adapter->ifp;
1801 u32 bufsz, psrtype;
1802
1803 if (ifp->if_mtu > ETHERMTU)
1804 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1805 else
1806 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1807
1808 psrtype = IXGBE_PSRTYPE_TCPHDR
1809 | IXGBE_PSRTYPE_UDPHDR
1810 | IXGBE_PSRTYPE_IPV4HDR
1811 | IXGBE_PSRTYPE_IPV6HDR
1812 | IXGBE_PSRTYPE_L2HDR;
1813
1814 if (adapter->num_queues > 1)
1815 psrtype |= 1 << 29;
1816
1817 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1818
1819 /* Tell PF our max_frame size */
1820 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1821 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1822 }
1823
1824 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1825 u64 rdba = rxr->rxdma.dma_paddr;
1826 u32 reg, rxdctl;
1827 int j = rxr->me;
1828
1829 /* Disable the queue */
1830 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1831 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1832 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1833 for (int k = 0; k < 10; k++) {
1834 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1835 IXGBE_RXDCTL_ENABLE)
1836 msec_delay(1);
1837 else
1838 break;
1839 }
1840 IXGBE_WRITE_BARRIER(hw);
1841 /* Setup the Base and Length of the Rx Descriptor Ring */
1842 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1843 (rdba & 0x00000000ffffffffULL));
1844 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1845 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1846 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1847
1848 /* Reset the ring indices */
1849 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1850 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1851
1852 /* Set up the SRRCTL register */
1853 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1854 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1855 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1856 reg |= bufsz;
1857 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1858 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1859
1860 /* Capture Rx Tail index */
1861 rxr->tail = IXGBE_VFRDT(rxr->me);
1862
1863 /* Do the queue enabling last */
1864 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1865 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1866 for (int k = 0; k < 10; k++) {
1867 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1868 IXGBE_RXDCTL_ENABLE)
1869 break;
1870 msec_delay(1);
1871 }
1872 IXGBE_WRITE_BARRIER(hw);
1873
1874 /* Set the Tail Pointer */
1875 #ifdef DEV_NETMAP
1876 /*
1877 * In netmap mode, we must preserve the buffers made
1878 * available to userspace before the if_init()
1879 * (this is true by default on the TX side, because
1880 * init makes all buffers available to userspace).
1881 *
1882 * netmap_reset() and the device specific routines
1883 * (e.g. ixgbe_setup_receive_rings()) map these
1884 * buffers at the end of the NIC ring, so here we
1885 * must set the RDT (tail) register to make sure
1886 * they are not overwritten.
1887 *
1888 * In this driver the NIC ring starts at RDH = 0,
1889 * RDT points to the last slot available for reception (?),
1890 * so RDT = num_rx_desc - 1 means the whole ring is available.
1891 */
1892 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1893 (ifp->if_capenable & IFCAP_NETMAP)) {
1894 struct netmap_adapter *na = NA(adapter->ifp);
1895 struct netmap_kring *kring = na->rx_rings[i];
1896 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1897
1898 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1899 } else
1900 #endif /* DEV_NETMAP */
1901 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1902 adapter->num_rx_desc - 1);
1903 }
1904
1905 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1906 ixv_initialize_rss_mapping(adapter);
1907 } /* ixv_initialize_receive_units */
1908
1909 /************************************************************************
1910 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1911 *
1912 * Retrieves the TDH value from the hardware
1913 ************************************************************************/
1914 static int
1915 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1916 {
1917 struct sysctlnode node = *rnode;
1918 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1919 uint32_t val;
1920
1921 if (!txr)
1922 return (0);
1923
1924 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1925 node.sysctl_data = &val;
1926 return sysctl_lookup(SYSCTLFN_CALL(&node));
1927 } /* ixv_sysctl_tdh_handler */
1928
1929 /************************************************************************
1930 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1931 *
1932 * Retrieves the TDT value from the hardware
1933 ************************************************************************/
1934 static int
1935 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1936 {
1937 struct sysctlnode node = *rnode;
1938 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1939 uint32_t val;
1940
1941 if (!txr)
1942 return (0);
1943
1944 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1945 node.sysctl_data = &val;
1946 return sysctl_lookup(SYSCTLFN_CALL(&node));
1947 } /* ixv_sysctl_tdt_handler */
1948
1949 /************************************************************************
1950 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1951 * handler function
1952 *
1953 * Retrieves the next_to_check value
1954 ************************************************************************/
1955 static int
1956 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1957 {
1958 struct sysctlnode node = *rnode;
1959 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1960 uint32_t val;
1961
1962 if (!rxr)
1963 return (0);
1964
1965 val = rxr->next_to_check;
1966 node.sysctl_data = &val;
1967 return sysctl_lookup(SYSCTLFN_CALL(&node));
1968 } /* ixv_sysctl_next_to_check_handler */
1969
1970 /************************************************************************
1971 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1972 *
1973 * Retrieves the RDH value from the hardware
1974 ************************************************************************/
1975 static int
1976 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1977 {
1978 struct sysctlnode node = *rnode;
1979 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1980 uint32_t val;
1981
1982 if (!rxr)
1983 return (0);
1984
1985 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1986 node.sysctl_data = &val;
1987 return sysctl_lookup(SYSCTLFN_CALL(&node));
1988 } /* ixv_sysctl_rdh_handler */
1989
1990 /************************************************************************
1991 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1992 *
1993 * Retrieves the RDT value from the hardware
1994 ************************************************************************/
1995 static int
1996 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1997 {
1998 struct sysctlnode node = *rnode;
1999 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2000 uint32_t val;
2001
2002 if (!rxr)
2003 return (0);
2004
2005 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
2006 node.sysctl_data = &val;
2007 return sysctl_lookup(SYSCTLFN_CALL(&node));
2008 } /* ixv_sysctl_rdt_handler */
2009
2010 static void
2011 ixv_setup_vlan_tagging(struct adapter *adapter)
2012 {
2013 struct ethercom *ec = &adapter->osdep.ec;
2014 struct ixgbe_hw *hw = &adapter->hw;
2015 struct rx_ring *rxr;
2016 u32 ctrl;
2017 int i;
2018 bool hwtagging;
2019
2020 /* Enable HW tagging only if any vlan is attached */
2021 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2022 && VLAN_ATTACHED(ec);
2023
2024 /* Enable the queues */
2025 for (i = 0; i < adapter->num_queues; i++) {
2026 rxr = &adapter->rx_rings[i];
2027 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2028 if (hwtagging)
2029 ctrl |= IXGBE_RXDCTL_VME;
2030 else
2031 ctrl &= ~IXGBE_RXDCTL_VME;
2032 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2033 /*
2034 * Let Rx path know that it needs to store VLAN tag
2035 * as part of extra mbuf info.
2036 */
2037 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2038 }
2039 } /* ixv_setup_vlan_tagging */
2040
2041 /************************************************************************
2042 * ixv_setup_vlan_support
2043 ************************************************************************/
2044 static int
2045 ixv_setup_vlan_support(struct adapter *adapter)
2046 {
2047 struct ethercom *ec = &adapter->osdep.ec;
2048 struct ixgbe_hw *hw = &adapter->hw;
2049 u32 vid, vfta, retry;
2050 struct vlanid_list *vlanidp;
2051 int rv, error = 0;
2052
2053 /*
2054 * This function is called from both if_init and ifflags_cb()
2055 * on NetBSD.
2056 */
2057
2058 /*
2059 * Part 1:
2060 * Setup VLAN HW tagging
2061 */
2062 ixv_setup_vlan_tagging(adapter);
2063
2064 if (!VLAN_ATTACHED(ec))
2065 return 0;
2066
2067 /*
2068 * Part 2:
2069 * Setup VLAN HW filter
2070 */
2071 /* Cleanup shadow_vfta */
2072 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2073 adapter->shadow_vfta[i] = 0;
2074 /* Generate shadow_vfta from ec_vids */
2075 ETHER_LOCK(ec);
2076 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2077 uint32_t idx;
2078
2079 idx = vlanidp->vid / 32;
2080 KASSERT(idx < IXGBE_VFTA_SIZE);
2081 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2082 }
2083 ETHER_UNLOCK(ec);
2084
2085 /*
2086 * A soft reset zero's out the VFTA, so
2087 * we need to repopulate it now.
2088 */
2089 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2090 if (adapter->shadow_vfta[i] == 0)
2091 continue;
2092 vfta = adapter->shadow_vfta[i];
2093 /*
2094 * Reconstruct the vlan id's
2095 * based on the bits set in each
2096 * of the array ints.
2097 */
2098 for (int j = 0; j < 32; j++) {
2099 retry = 0;
2100 if ((vfta & ((u32)1 << j)) == 0)
2101 continue;
2102 vid = (i * 32) + j;
2103
2104 /* Call the shared code mailbox routine */
2105 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2106 FALSE)) != 0) {
2107 if (++retry > 5) {
2108 device_printf(adapter->dev,
2109 "%s: max retry exceeded\n",
2110 __func__);
2111 break;
2112 }
2113 }
2114 if (rv != 0) {
2115 device_printf(adapter->dev,
2116 "failed to set vlan %d\n", vid);
2117 error = EACCES;
2118 }
2119 }
2120 }
2121 return error;
2122 } /* ixv_setup_vlan_support */
2123
2124 static int
2125 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2126 {
2127 struct ifnet *ifp = &ec->ec_if;
2128 struct adapter *adapter = ifp->if_softc;
2129 int rv;
2130
2131 if (set)
2132 rv = ixv_register_vlan(adapter, vid);
2133 else
2134 rv = ixv_unregister_vlan(adapter, vid);
2135
2136 if (rv != 0)
2137 return rv;
2138
2139 /*
2140 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2141 * or 0 to 1.
2142 */
2143 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2144 ixv_setup_vlan_tagging(adapter);
2145
2146 return rv;
2147 }
2148
2149 /************************************************************************
2150 * ixv_register_vlan
2151 *
2152 * Run via a vlan config EVENT, it enables us to use the
2153 * HW Filter table since we can get the vlan id. This just
2154 * creates the entry in the soft version of the VFTA, init
2155 * will repopulate the real table.
2156 ************************************************************************/
2157 static int
2158 ixv_register_vlan(struct adapter *adapter, u16 vtag)
2159 {
2160 struct ixgbe_hw *hw = &adapter->hw;
2161 u16 index, bit;
2162 int error;
2163
2164 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2165 return EINVAL;
2166 IXGBE_CORE_LOCK(adapter);
2167 index = (vtag >> 5) & 0x7F;
2168 bit = vtag & 0x1F;
2169 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2170 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2171 IXGBE_CORE_UNLOCK(adapter);
2172
2173 if (error != 0) {
2174 device_printf(adapter->dev, "failed to register vlan %hu\n",
2175 vtag);
2176 error = EACCES;
2177 }
2178 return error;
2179 } /* ixv_register_vlan */
2180
2181 /************************************************************************
2182 * ixv_unregister_vlan
2183 *
2184 * Run via a vlan unconfig EVENT, remove our entry
2185 * in the soft vfta.
2186 ************************************************************************/
2187 static int
2188 ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2189 {
2190 struct ixgbe_hw *hw = &adapter->hw;
2191 u16 index, bit;
2192 int error;
2193
2194 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2195 return EINVAL;
2196
2197 IXGBE_CORE_LOCK(adapter);
2198 index = (vtag >> 5) & 0x7F;
2199 bit = vtag & 0x1F;
2200 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2201 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2202 IXGBE_CORE_UNLOCK(adapter);
2203
2204 if (error != 0) {
2205 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2206 vtag);
2207 error = EIO;
2208 }
2209 return error;
2210 } /* ixv_unregister_vlan */
2211
2212 /************************************************************************
2213 * ixv_enable_intr
2214 ************************************************************************/
2215 static void
2216 ixv_enable_intr(struct adapter *adapter)
2217 {
2218 struct ixgbe_hw *hw = &adapter->hw;
2219 struct ix_queue *que = adapter->queues;
2220 u32 mask;
2221 int i;
2222
2223 /* For VTEIAC */
2224 mask = (1 << adapter->vector);
2225 for (i = 0; i < adapter->num_queues; i++, que++)
2226 mask |= (1 << que->msix);
2227 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2228
2229 /* For VTEIMS */
2230 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2231 que = adapter->queues;
2232 for (i = 0; i < adapter->num_queues; i++, que++)
2233 ixv_enable_queue(adapter, que->msix);
2234
2235 IXGBE_WRITE_FLUSH(hw);
2236 } /* ixv_enable_intr */
2237
2238 /************************************************************************
2239 * ixv_disable_intr
2240 ************************************************************************/
2241 static void
2242 ixv_disable_intr(struct adapter *adapter)
2243 {
2244 struct ix_queue *que = adapter->queues;
2245
2246 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2247
2248 /* disable interrupts other than queues */
2249 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2250
2251 for (int i = 0; i < adapter->num_queues; i++, que++)
2252 ixv_disable_queue(adapter, que->msix);
2253
2254 IXGBE_WRITE_FLUSH(&adapter->hw);
2255 } /* ixv_disable_intr */
2256
2257 /************************************************************************
2258 * ixv_set_ivar
2259 *
2260 * Setup the correct IVAR register for a particular MSI-X interrupt
2261 * - entry is the register array entry
2262 * - vector is the MSI-X vector for this queue
2263 * - type is RX/TX/MISC
2264 ************************************************************************/
2265 static void
2266 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2267 {
2268 struct ixgbe_hw *hw = &adapter->hw;
2269 u32 ivar, index;
2270
2271 vector |= IXGBE_IVAR_ALLOC_VAL;
2272
2273 if (type == -1) { /* MISC IVAR */
2274 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2275 ivar &= ~0xFF;
2276 ivar |= vector;
2277 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2278 } else { /* RX/TX IVARS */
2279 index = (16 * (entry & 1)) + (8 * type);
2280 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2281 ivar &= ~(0xffUL << index);
2282 ivar |= ((u32)vector << index);
2283 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2284 }
2285 } /* ixv_set_ivar */
2286
2287 /************************************************************************
2288 * ixv_configure_ivars
2289 ************************************************************************/
2290 static void
2291 ixv_configure_ivars(struct adapter *adapter)
2292 {
2293 struct ix_queue *que = adapter->queues;
2294
2295 /* XXX We should sync EITR value calculation with ixgbe.c? */
2296
2297 for (int i = 0; i < adapter->num_queues; i++, que++) {
2298 /* First the RX queue entry */
2299 ixv_set_ivar(adapter, i, que->msix, 0);
2300 /* ... and the TX */
2301 ixv_set_ivar(adapter, i, que->msix, 1);
2302 /* Set an initial value in EITR */
2303 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2304 }
2305
2306 /* For the mailbox interrupt */
2307 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2308 } /* ixv_configure_ivars */
2309
2310
2311 /************************************************************************
2312 * ixv_save_stats
2313 *
2314 * The VF stats registers never have a truly virgin
2315 * starting point, so this routine tries to make an
2316 * artificial one, marking ground zero on attach as
2317 * it were.
2318 ************************************************************************/
2319 static void
2320 ixv_save_stats(struct adapter *adapter)
2321 {
2322 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2323
2324 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2325 stats->saved_reset_vfgprc +=
2326 stats->vfgprc.ev_count - stats->base_vfgprc;
2327 stats->saved_reset_vfgptc +=
2328 stats->vfgptc.ev_count - stats->base_vfgptc;
2329 stats->saved_reset_vfgorc +=
2330 stats->vfgorc.ev_count - stats->base_vfgorc;
2331 stats->saved_reset_vfgotc +=
2332 stats->vfgotc.ev_count - stats->base_vfgotc;
2333 stats->saved_reset_vfmprc +=
2334 stats->vfmprc.ev_count - stats->base_vfmprc;
2335 }
2336 } /* ixv_save_stats */
2337
2338 /************************************************************************
2339 * ixv_init_stats
2340 ************************************************************************/
2341 static void
2342 ixv_init_stats(struct adapter *adapter)
2343 {
2344 struct ixgbe_hw *hw = &adapter->hw;
2345
2346 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2347 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2348 adapter->stats.vf.last_vfgorc |=
2349 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2350
2351 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2352 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2353 adapter->stats.vf.last_vfgotc |=
2354 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2355
2356 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2357
2358 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2359 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2360 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2361 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2362 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2363 } /* ixv_init_stats */
2364
2365 #define UPDATE_STAT_32(reg, last, count) \
2366 { \
2367 u32 current = IXGBE_READ_REG(hw, (reg)); \
2368 if (current < (last)) \
2369 count.ev_count += 0x100000000LL; \
2370 (last) = current; \
2371 count.ev_count &= 0xFFFFFFFF00000000LL; \
2372 count.ev_count |= current; \
2373 }
2374
2375 #define UPDATE_STAT_36(lsb, msb, last, count) \
2376 { \
2377 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2378 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2379 u64 current = ((cur_msb << 32) | cur_lsb); \
2380 if (current < (last)) \
2381 count.ev_count += 0x1000000000LL; \
2382 (last) = current; \
2383 count.ev_count &= 0xFFFFFFF000000000LL; \
2384 count.ev_count |= current; \
2385 }
2386
2387 /************************************************************************
2388 * ixv_update_stats - Update the board statistics counters.
2389 ************************************************************************/
2390 void
2391 ixv_update_stats(struct adapter *adapter)
2392 {
2393 struct ixgbe_hw *hw = &adapter->hw;
2394 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2395
2396 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2397 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2398 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2399 stats->vfgorc);
2400 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2401 stats->vfgotc);
2402 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2403
2404 /* VF doesn't count errors by hardware */
2405
2406 } /* ixv_update_stats */
2407
2408 /************************************************************************
2409 * ixv_sysctl_interrupt_rate_handler
2410 ************************************************************************/
2411 static int
2412 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2413 {
2414 struct sysctlnode node = *rnode;
2415 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2416 struct adapter *adapter = que->adapter;
2417 uint32_t reg, usec, rate;
2418 int error;
2419
2420 if (que == NULL)
2421 return 0;
2422 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2423 usec = ((reg & 0x0FF8) >> 3);
2424 if (usec > 0)
2425 rate = 500000 / usec;
2426 else
2427 rate = 0;
2428 node.sysctl_data = &rate;
2429 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2430 if (error || newp == NULL)
2431 return error;
2432 reg &= ~0xfff; /* default, no limitation */
2433 if (rate > 0 && rate < 500000) {
2434 if (rate < 1000)
2435 rate = 1000;
2436 reg |= ((4000000/rate) & 0xff8);
2437 /*
2438 * When RSC is used, ITR interval must be larger than
2439 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2440 * The minimum value is always greater than 2us on 100M
2441 * (and 10M?(not documented)), but it's not on 1G and higher.
2442 */
2443 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2444 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2445 if ((adapter->num_queues > 1)
2446 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2447 return EINVAL;
2448 }
2449 ixv_max_interrupt_rate = rate;
2450 } else
2451 ixv_max_interrupt_rate = 0;
2452 ixv_eitr_write(adapter, que->msix, reg);
2453
2454 return (0);
2455 } /* ixv_sysctl_interrupt_rate_handler */
2456
2457 const struct sysctlnode *
2458 ixv_sysctl_instance(struct adapter *adapter)
2459 {
2460 const char *dvname;
2461 struct sysctllog **log;
2462 int rc;
2463 const struct sysctlnode *rnode;
2464
2465 log = &adapter->sysctllog;
2466 dvname = device_xname(adapter->dev);
2467
2468 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2469 0, CTLTYPE_NODE, dvname,
2470 SYSCTL_DESCR("ixv information and settings"),
2471 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2472 goto err;
2473
2474 return rnode;
2475 err:
2476 device_printf(adapter->dev,
2477 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2478 return NULL;
2479 }
2480
2481 static void
2482 ixv_add_device_sysctls(struct adapter *adapter)
2483 {
2484 struct sysctllog **log;
2485 const struct sysctlnode *rnode, *cnode;
2486 device_t dev;
2487
2488 dev = adapter->dev;
2489 log = &adapter->sysctllog;
2490
2491 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2492 aprint_error_dev(dev, "could not create sysctl root\n");
2493 return;
2494 }
2495
2496 if (sysctl_createv(log, 0, &rnode, &cnode,
2497 CTLFLAG_READWRITE, CTLTYPE_INT,
2498 "debug", SYSCTL_DESCR("Debug Info"),
2499 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2500 aprint_error_dev(dev, "could not create sysctl\n");
2501
2502 if (sysctl_createv(log, 0, &rnode, &cnode,
2503 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2504 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2505 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2506 aprint_error_dev(dev, "could not create sysctl\n");
2507
2508 if (sysctl_createv(log, 0, &rnode, &cnode,
2509 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2510 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2511 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2512 aprint_error_dev(dev, "could not create sysctl\n");
2513 }
2514
2515 /************************************************************************
2516 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2517 ************************************************************************/
2518 static void
2519 ixv_add_stats_sysctls(struct adapter *adapter)
2520 {
2521 device_t dev = adapter->dev;
2522 struct tx_ring *txr = adapter->tx_rings;
2523 struct rx_ring *rxr = adapter->rx_rings;
2524 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2525 struct ixgbe_hw *hw = &adapter->hw;
2526 const struct sysctlnode *rnode, *cnode;
2527 struct sysctllog **log = &adapter->sysctllog;
2528 const char *xname = device_xname(dev);
2529
2530 /* Driver Statistics */
2531 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2532 NULL, xname, "Driver tx dma soft fail EFBIG");
2533 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2534 NULL, xname, "m_defrag() failed");
2535 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2536 NULL, xname, "Driver tx dma hard fail EFBIG");
2537 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2538 NULL, xname, "Driver tx dma hard fail EINVAL");
2539 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2540 NULL, xname, "Driver tx dma hard fail other");
2541 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2542 NULL, xname, "Driver tx dma soft fail EAGAIN");
2543 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2544 NULL, xname, "Driver tx dma soft fail ENOMEM");
2545 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2546 NULL, xname, "Watchdog timeouts");
2547 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2548 NULL, xname, "TSO errors");
2549 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2550 NULL, xname, "Link MSI-X IRQ Handled");
2551
2552 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2553 snprintf(adapter->queues[i].evnamebuf,
2554 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2555 xname, i);
2556 snprintf(adapter->queues[i].namebuf,
2557 sizeof(adapter->queues[i].namebuf), "q%d", i);
2558
2559 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2560 aprint_error_dev(dev, "could not create sysctl root\n");
2561 break;
2562 }
2563
2564 if (sysctl_createv(log, 0, &rnode, &rnode,
2565 0, CTLTYPE_NODE,
2566 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2567 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2568 break;
2569
2570 if (sysctl_createv(log, 0, &rnode, &cnode,
2571 CTLFLAG_READWRITE, CTLTYPE_INT,
2572 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2573 ixv_sysctl_interrupt_rate_handler, 0,
2574 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2575 break;
2576
2577 if (sysctl_createv(log, 0, &rnode, &cnode,
2578 CTLFLAG_READONLY, CTLTYPE_INT,
2579 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2580 ixv_sysctl_tdh_handler, 0, (void *)txr,
2581 0, CTL_CREATE, CTL_EOL) != 0)
2582 break;
2583
2584 if (sysctl_createv(log, 0, &rnode, &cnode,
2585 CTLFLAG_READONLY, CTLTYPE_INT,
2586 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2587 ixv_sysctl_tdt_handler, 0, (void *)txr,
2588 0, CTL_CREATE, CTL_EOL) != 0)
2589 break;
2590
2591 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2592 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2593 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2594 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2595 "Handled queue in softint");
2596 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2597 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2598 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2599 NULL, adapter->queues[i].evnamebuf, "TSO");
2600 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2601 NULL, adapter->queues[i].evnamebuf,
2602 "Queue No Descriptor Available");
2603 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2604 NULL, adapter->queues[i].evnamebuf,
2605 "Queue Packets Transmitted");
2606 #ifndef IXGBE_LEGACY_TX
2607 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2608 NULL, adapter->queues[i].evnamebuf,
2609 "Packets dropped in pcq");
2610 #endif
2611
2612 #ifdef LRO
2613 struct lro_ctrl *lro = &rxr->lro;
2614 #endif /* LRO */
2615
2616 if (sysctl_createv(log, 0, &rnode, &cnode,
2617 CTLFLAG_READONLY,
2618 CTLTYPE_INT,
2619 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2620 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2621 CTL_CREATE, CTL_EOL) != 0)
2622 break;
2623
2624 if (sysctl_createv(log, 0, &rnode, &cnode,
2625 CTLFLAG_READONLY,
2626 CTLTYPE_INT,
2627 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2628 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2629 CTL_CREATE, CTL_EOL) != 0)
2630 break;
2631
2632 if (sysctl_createv(log, 0, &rnode, &cnode,
2633 CTLFLAG_READONLY,
2634 CTLTYPE_INT,
2635 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2636 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2637 CTL_CREATE, CTL_EOL) != 0)
2638 break;
2639
2640 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2641 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2642 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2643 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2644 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2645 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2646 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2647 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2648 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2649 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2650 #ifdef LRO
2651 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2652 CTLFLAG_RD, &lro->lro_queued, 0,
2653 "LRO Queued");
2654 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2655 CTLFLAG_RD, &lro->lro_flushed, 0,
2656 "LRO Flushed");
2657 #endif /* LRO */
2658 }
2659
2660 /* MAC stats get their own sub node */
2661
2662 snprintf(stats->namebuf,
2663 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2664
2665 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2666 stats->namebuf, "rx csum offload - IP");
2667 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2668 stats->namebuf, "rx csum offload - L4");
2669 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2670 stats->namebuf, "rx csum offload - IP bad");
2671 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2672 stats->namebuf, "rx csum offload - L4 bad");
2673
2674 /* Packet Reception Stats */
2675 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2676 xname, "Good Packets Received");
2677 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2678 xname, "Good Octets Received");
2679 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2680 xname, "Multicast Packets Received");
2681 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2682 xname, "Good Packets Transmitted");
2683 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2684 xname, "Good Octets Transmitted");
2685
2686 /* Mailbox Stats */
2687 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2688 xname, "message TXs");
2689 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2690 xname, "message RXs");
2691 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2692 xname, "ACKs");
2693 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2694 xname, "REQs");
2695 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2696 xname, "RSTs");
2697
2698 } /* ixv_add_stats_sysctls */
2699
2700 static void
2701 ixv_clear_evcnt(struct adapter *adapter)
2702 {
2703 struct tx_ring *txr = adapter->tx_rings;
2704 struct rx_ring *rxr = adapter->rx_rings;
2705 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2706 struct ixgbe_hw *hw = &adapter->hw;
2707 int i;
2708
2709 /* Driver Statistics */
2710 adapter->efbig_tx_dma_setup.ev_count = 0;
2711 adapter->mbuf_defrag_failed.ev_count = 0;
2712 adapter->efbig2_tx_dma_setup.ev_count = 0;
2713 adapter->einval_tx_dma_setup.ev_count = 0;
2714 adapter->other_tx_dma_setup.ev_count = 0;
2715 adapter->eagain_tx_dma_setup.ev_count = 0;
2716 adapter->enomem_tx_dma_setup.ev_count = 0;
2717 adapter->watchdog_events.ev_count = 0;
2718 adapter->tso_err.ev_count = 0;
2719 adapter->link_irq.ev_count = 0;
2720
2721 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2722 adapter->queues[i].irqs.ev_count = 0;
2723 adapter->queues[i].handleq.ev_count = 0;
2724 adapter->queues[i].req.ev_count = 0;
2725 txr->tso_tx.ev_count = 0;
2726 txr->no_desc_avail.ev_count = 0;
2727 txr->total_packets.ev_count = 0;
2728 #ifndef IXGBE_LEGACY_TX
2729 txr->pcq_drops.ev_count = 0;
2730 #endif
2731 txr->q_efbig_tx_dma_setup = 0;
2732 txr->q_mbuf_defrag_failed = 0;
2733 txr->q_efbig2_tx_dma_setup = 0;
2734 txr->q_einval_tx_dma_setup = 0;
2735 txr->q_other_tx_dma_setup = 0;
2736 txr->q_eagain_tx_dma_setup = 0;
2737 txr->q_enomem_tx_dma_setup = 0;
2738 txr->q_tso_err = 0;
2739
2740 rxr->rx_packets.ev_count = 0;
2741 rxr->rx_bytes.ev_count = 0;
2742 rxr->rx_copies.ev_count = 0;
2743 rxr->no_jmbuf.ev_count = 0;
2744 rxr->rx_discarded.ev_count = 0;
2745 }
2746
2747 /* MAC stats get their own sub node */
2748
2749 stats->ipcs.ev_count = 0;
2750 stats->l4cs.ev_count = 0;
2751 stats->ipcs_bad.ev_count = 0;
2752 stats->l4cs_bad.ev_count = 0;
2753
2754 /* Packet Reception Stats */
2755 stats->vfgprc.ev_count = 0;
2756 stats->vfgorc.ev_count = 0;
2757 stats->vfmprc.ev_count = 0;
2758 stats->vfgptc.ev_count = 0;
2759 stats->vfgotc.ev_count = 0;
2760
2761 /* Mailbox Stats */
2762 hw->mbx.stats.msgs_tx.ev_count = 0;
2763 hw->mbx.stats.msgs_rx.ev_count = 0;
2764 hw->mbx.stats.acks.ev_count = 0;
2765 hw->mbx.stats.reqs.ev_count = 0;
2766 hw->mbx.stats.rsts.ev_count = 0;
2767
2768 } /* ixv_clear_evcnt */
2769
2770 /************************************************************************
2771 * ixv_set_sysctl_value
2772 ************************************************************************/
2773 static void
2774 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2775 const char *description, int *limit, int value)
2776 {
2777 device_t dev = adapter->dev;
2778 struct sysctllog **log;
2779 const struct sysctlnode *rnode, *cnode;
2780
2781 log = &adapter->sysctllog;
2782 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2783 aprint_error_dev(dev, "could not create sysctl root\n");
2784 return;
2785 }
2786 if (sysctl_createv(log, 0, &rnode, &cnode,
2787 CTLFLAG_READWRITE, CTLTYPE_INT,
2788 name, SYSCTL_DESCR(description),
2789 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2790 aprint_error_dev(dev, "could not create sysctl\n");
2791 *limit = value;
2792 } /* ixv_set_sysctl_value */
2793
2794 /************************************************************************
2795 * ixv_print_debug_info
2796 *
2797 * Called only when em_display_debug_stats is enabled.
2798 * Provides a way to take a look at important statistics
2799 * maintained by the driver and hardware.
2800 ************************************************************************/
2801 static void
2802 ixv_print_debug_info(struct adapter *adapter)
2803 {
2804 device_t dev = adapter->dev;
2805 struct ix_queue *que = adapter->queues;
2806 struct rx_ring *rxr;
2807 struct tx_ring *txr;
2808 #ifdef LRO
2809 struct lro_ctrl *lro;
2810 #endif /* LRO */
2811
2812 for (int i = 0; i < adapter->num_queues; i++, que++) {
2813 txr = que->txr;
2814 rxr = que->rxr;
2815 #ifdef LRO
2816 lro = &rxr->lro;
2817 #endif /* LRO */
2818 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2819 que->msix, (long)que->irqs.ev_count);
2820 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2821 rxr->me, (long long)rxr->rx_packets.ev_count);
2822 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2823 rxr->me, (long)rxr->rx_bytes.ev_count);
2824 #ifdef LRO
2825 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2826 rxr->me, (uintmax_t)lro->lro_queued);
2827 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2828 rxr->me, (uintmax_t)lro->lro_flushed);
2829 #endif /* LRO */
2830 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2831 txr->me, (long)txr->total_packets.ev_count);
2832 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2833 txr->me, (long)txr->no_desc_avail.ev_count);
2834 }
2835
2836 device_printf(dev, "MBX IRQ Handled: %lu\n",
2837 (long)adapter->link_irq.ev_count);
2838 } /* ixv_print_debug_info */
2839
2840 /************************************************************************
2841 * ixv_sysctl_debug
2842 ************************************************************************/
2843 static int
2844 ixv_sysctl_debug(SYSCTLFN_ARGS)
2845 {
2846 struct sysctlnode node = *rnode;
2847 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2848 int error, result;
2849
2850 node.sysctl_data = &result;
2851 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2852
2853 if (error || newp == NULL)
2854 return error;
2855
2856 if (result == 1)
2857 ixv_print_debug_info(adapter);
2858
2859 return 0;
2860 } /* ixv_sysctl_debug */
2861
2862 /************************************************************************
2863 * ixv_init_device_features
2864 ************************************************************************/
2865 static void
2866 ixv_init_device_features(struct adapter *adapter)
2867 {
2868 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2869 | IXGBE_FEATURE_VF
2870 | IXGBE_FEATURE_RSS
2871 | IXGBE_FEATURE_LEGACY_TX;
2872
2873 /* A tad short on feature flags for VFs, atm. */
2874 switch (adapter->hw.mac.type) {
2875 case ixgbe_mac_82599_vf:
2876 break;
2877 case ixgbe_mac_X540_vf:
2878 break;
2879 case ixgbe_mac_X550_vf:
2880 case ixgbe_mac_X550EM_x_vf:
2881 case ixgbe_mac_X550EM_a_vf:
2882 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2883 break;
2884 default:
2885 break;
2886 }
2887
2888 /* Enabled by default... */
2889 /* Is a virtual function (VF) */
2890 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2891 adapter->feat_en |= IXGBE_FEATURE_VF;
2892 /* Netmap */
2893 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2894 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2895 /* Receive-Side Scaling (RSS) */
2896 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2897 adapter->feat_en |= IXGBE_FEATURE_RSS;
2898 /* Needs advanced context descriptor regardless of offloads req'd */
2899 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2900 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2901
2902 /* Enabled via sysctl... */
2903 /* Legacy (single queue) transmit */
2904 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2905 ixv_enable_legacy_tx)
2906 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2907 } /* ixv_init_device_features */
2908
2909 /************************************************************************
2910 * ixv_shutdown - Shutdown entry point
2911 ************************************************************************/
2912 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2913 static int
2914 ixv_shutdown(device_t dev)
2915 {
2916 struct adapter *adapter = device_private(dev);
2917 IXGBE_CORE_LOCK(adapter);
2918 ixv_stop(adapter);
2919 IXGBE_CORE_UNLOCK(adapter);
2920
2921 return (0);
2922 } /* ixv_shutdown */
2923 #endif
2924
2925 static int
2926 ixv_ifflags_cb(struct ethercom *ec)
2927 {
2928 struct ifnet *ifp = &ec->ec_if;
2929 struct adapter *adapter = ifp->if_softc;
2930 u_short saved_flags;
2931 u_short change;
2932 int rv = 0;
2933
2934 IXGBE_CORE_LOCK(adapter);
2935
2936 saved_flags = adapter->if_flags;
2937 change = ifp->if_flags ^ adapter->if_flags;
2938 if (change != 0)
2939 adapter->if_flags = ifp->if_flags;
2940
2941 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2942 rv = ENETRESET;
2943 goto out;
2944 } else if ((change & IFF_PROMISC) != 0) {
2945 rv = ixv_set_rxfilter(adapter);
2946 if (rv != 0) {
2947 /* Restore previous */
2948 adapter->if_flags = saved_flags;
2949 goto out;
2950 }
2951 }
2952
2953 /* Check for ec_capenable. */
2954 change = ec->ec_capenable ^ adapter->ec_capenable;
2955 adapter->ec_capenable = ec->ec_capenable;
2956 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
2957 | ETHERCAP_VLAN_HWFILTER)) != 0) {
2958 rv = ENETRESET;
2959 goto out;
2960 }
2961
2962 /*
2963 * Special handling is not required for ETHERCAP_VLAN_MTU.
2964 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
2965 */
2966
2967 /* Set up VLAN support and filter */
2968 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
2969 rv = ixv_setup_vlan_support(adapter);
2970
2971 out:
2972 IXGBE_CORE_UNLOCK(adapter);
2973
2974 return rv;
2975 }
2976
2977
2978 /************************************************************************
2979 * ixv_ioctl - Ioctl entry point
2980 *
2981 * Called when the user wants to configure the interface.
2982 *
2983 * return 0 on success, positive on failure
2984 ************************************************************************/
2985 static int
2986 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2987 {
2988 struct adapter *adapter = ifp->if_softc;
2989 struct ixgbe_hw *hw = &adapter->hw;
2990 struct ifcapreq *ifcr = data;
2991 int error;
2992 int l4csum_en;
2993 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
2994 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2995
2996 switch (command) {
2997 case SIOCSIFFLAGS:
2998 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2999 break;
3000 case SIOCADDMULTI: {
3001 struct ether_multi *enm;
3002 struct ether_multistep step;
3003 struct ethercom *ec = &adapter->osdep.ec;
3004 bool overflow = false;
3005 int mcnt = 0;
3006
3007 /*
3008 * Check the number of multicast address. If it exceeds,
3009 * return ENOSPC.
3010 * Update this code when we support API 1.3.
3011 */
3012 ETHER_LOCK(ec);
3013 ETHER_FIRST_MULTI(step, ec, enm);
3014 while (enm != NULL) {
3015 mcnt++;
3016
3017 /*
3018 * This code is before adding, so one room is required
3019 * at least.
3020 */
3021 if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3022 overflow = true;
3023 break;
3024 }
3025 ETHER_NEXT_MULTI(step, enm);
3026 }
3027 ETHER_UNLOCK(ec);
3028 error = 0;
3029 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3030 error = hw->mac.ops.update_xcast_mode(hw,
3031 IXGBEVF_XCAST_MODE_ALLMULTI);
3032 if (error == IXGBE_ERR_NOT_TRUSTED) {
3033 device_printf(adapter->dev,
3034 "this interface is not trusted\n");
3035 error = EPERM;
3036 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3037 device_printf(adapter->dev,
3038 "the PF doesn't support allmulti mode\n");
3039 error = EOPNOTSUPP;
3040 } else if (error) {
3041 device_printf(adapter->dev,
3042 "number of Ethernet multicast addresses "
3043 "exceeds the limit (%d). error = %d\n",
3044 IXGBE_MAX_VF_MC, error);
3045 error = ENOSPC;
3046 } else
3047 ec->ec_flags |= ETHER_F_ALLMULTI;
3048 }
3049 if (error)
3050 return error;
3051 }
3052 /*FALLTHROUGH*/
3053 case SIOCDELMULTI:
3054 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3055 break;
3056 case SIOCSIFMEDIA:
3057 case SIOCGIFMEDIA:
3058 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3059 break;
3060 case SIOCSIFCAP:
3061 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3062 break;
3063 case SIOCSIFMTU:
3064 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3065 break;
3066 case SIOCZIFDATA:
3067 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3068 ixv_update_stats(adapter);
3069 ixv_clear_evcnt(adapter);
3070 break;
3071 default:
3072 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3073 break;
3074 }
3075
3076 switch (command) {
3077 case SIOCSIFCAP:
3078 /* Layer-4 Rx checksum offload has to be turned on and
3079 * off as a unit.
3080 */
3081 l4csum_en = ifcr->ifcr_capenable & l4csum;
3082 if (l4csum_en != l4csum && l4csum_en != 0)
3083 return EINVAL;
3084 /*FALLTHROUGH*/
3085 case SIOCADDMULTI:
3086 case SIOCDELMULTI:
3087 case SIOCSIFFLAGS:
3088 case SIOCSIFMTU:
3089 default:
3090 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3091 return error;
3092 if ((ifp->if_flags & IFF_RUNNING) == 0)
3093 ;
3094 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3095 IXGBE_CORE_LOCK(adapter);
3096 ixv_init_locked(adapter);
3097 IXGBE_CORE_UNLOCK(adapter);
3098 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3099 /*
3100 * Multicast list has changed; set the hardware filter
3101 * accordingly.
3102 */
3103 IXGBE_CORE_LOCK(adapter);
3104 ixv_disable_intr(adapter);
3105 ixv_set_rxfilter(adapter);
3106 ixv_enable_intr(adapter);
3107 IXGBE_CORE_UNLOCK(adapter);
3108 }
3109 return 0;
3110 }
3111 } /* ixv_ioctl */
3112
3113 /************************************************************************
3114 * ixv_init
3115 ************************************************************************/
3116 static int
3117 ixv_init(struct ifnet *ifp)
3118 {
3119 struct adapter *adapter = ifp->if_softc;
3120
3121 IXGBE_CORE_LOCK(adapter);
3122 ixv_init_locked(adapter);
3123 IXGBE_CORE_UNLOCK(adapter);
3124
3125 return 0;
3126 } /* ixv_init */
3127
3128 /************************************************************************
3129 * ixv_handle_que
3130 ************************************************************************/
3131 static void
3132 ixv_handle_que(void *context)
3133 {
3134 struct ix_queue *que = context;
3135 struct adapter *adapter = que->adapter;
3136 struct tx_ring *txr = que->txr;
3137 struct ifnet *ifp = adapter->ifp;
3138 bool more;
3139
3140 que->handleq.ev_count++;
3141
3142 if (ifp->if_flags & IFF_RUNNING) {
3143 more = ixgbe_rxeof(que);
3144 IXGBE_TX_LOCK(txr);
3145 more |= ixgbe_txeof(txr);
3146 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
3147 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3148 ixgbe_mq_start_locked(ifp, txr);
3149 /* Only for queue 0 */
3150 /* NetBSD still needs this for CBQ */
3151 if ((&adapter->queues[0] == que)
3152 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3153 ixgbe_legacy_start_locked(ifp, txr);
3154 IXGBE_TX_UNLOCK(txr);
3155 if (more) {
3156 que->req.ev_count++;
3157 if (adapter->txrx_use_workqueue) {
3158 /*
3159 * "enqueued flag" is not required here
3160 * the same as ixg(4). See ixgbe_msix_que().
3161 */
3162 workqueue_enqueue(adapter->que_wq,
3163 &que->wq_cookie, curcpu());
3164 } else
3165 softint_schedule(que->que_si);
3166 return;
3167 }
3168 }
3169
3170 /* Re-enable this interrupt */
3171 ixv_enable_queue(adapter, que->msix);
3172
3173 return;
3174 } /* ixv_handle_que */
3175
3176 /************************************************************************
3177 * ixv_handle_que_work
3178 ************************************************************************/
3179 static void
3180 ixv_handle_que_work(struct work *wk, void *context)
3181 {
3182 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3183
3184 /*
3185 * "enqueued flag" is not required here the same as ixg(4).
3186 * See ixgbe_msix_que().
3187 */
3188 ixv_handle_que(que);
3189 }
3190
3191 /************************************************************************
3192 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3193 ************************************************************************/
3194 static int
3195 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
3196 {
3197 device_t dev = adapter->dev;
3198 struct ix_queue *que = adapter->queues;
3199 struct tx_ring *txr = adapter->tx_rings;
3200 int error, msix_ctrl, rid, vector = 0;
3201 pci_chipset_tag_t pc;
3202 pcitag_t tag;
3203 char intrbuf[PCI_INTRSTR_LEN];
3204 char wqname[MAXCOMLEN];
3205 char intr_xname[32];
3206 const char *intrstr = NULL;
3207 kcpuset_t *affinity;
3208 int cpu_id = 0;
3209
3210 pc = adapter->osdep.pc;
3211 tag = adapter->osdep.tag;
3212
3213 adapter->osdep.nintrs = adapter->num_queues + 1;
3214 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3215 adapter->osdep.nintrs) != 0) {
3216 aprint_error_dev(dev,
3217 "failed to allocate MSI-X interrupt\n");
3218 return (ENXIO);
3219 }
3220
3221 kcpuset_create(&affinity, false);
3222 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3223 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3224 device_xname(dev), i);
3225 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3226 sizeof(intrbuf));
3227 #ifdef IXGBE_MPSAFE
3228 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3229 true);
3230 #endif
3231 /* Set the handler function */
3232 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3233 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3234 intr_xname);
3235 if (que->res == NULL) {
3236 pci_intr_release(pc, adapter->osdep.intrs,
3237 adapter->osdep.nintrs);
3238 aprint_error_dev(dev,
3239 "Failed to register QUE handler\n");
3240 kcpuset_destroy(affinity);
3241 return (ENXIO);
3242 }
3243 que->msix = vector;
3244 adapter->active_queues |= (u64)(1 << que->msix);
3245
3246 cpu_id = i;
3247 /* Round-robin affinity */
3248 kcpuset_zero(affinity);
3249 kcpuset_set(affinity, cpu_id % ncpu);
3250 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3251 NULL);
3252 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3253 intrstr);
3254 if (error == 0)
3255 aprint_normal(", bound queue %d to cpu %d\n",
3256 i, cpu_id % ncpu);
3257 else
3258 aprint_normal("\n");
3259
3260 #ifndef IXGBE_LEGACY_TX
3261 txr->txr_si
3262 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3263 ixgbe_deferred_mq_start, txr);
3264 #endif
3265 que->que_si
3266 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3267 ixv_handle_que, que);
3268 if (que->que_si == NULL) {
3269 aprint_error_dev(dev,
3270 "could not establish software interrupt\n");
3271 }
3272 }
3273 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3274 error = workqueue_create(&adapter->txr_wq, wqname,
3275 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3276 IXGBE_WORKQUEUE_FLAGS);
3277 if (error) {
3278 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
3279 }
3280 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3281
3282 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3283 error = workqueue_create(&adapter->que_wq, wqname,
3284 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3285 IXGBE_WORKQUEUE_FLAGS);
3286 if (error) {
3287 aprint_error_dev(dev,
3288 "couldn't create workqueue\n");
3289 }
3290
3291 /* and Mailbox */
3292 cpu_id++;
3293 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3294 adapter->vector = vector;
3295 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3296 sizeof(intrbuf));
3297 #ifdef IXGBE_MPSAFE
3298 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3299 true);
3300 #endif
3301 /* Set the mbx handler function */
3302 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3303 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3304 intr_xname);
3305 if (adapter->osdep.ihs[vector] == NULL) {
3306 aprint_error_dev(dev, "Failed to register LINK handler\n");
3307 kcpuset_destroy(affinity);
3308 return (ENXIO);
3309 }
3310 /* Round-robin affinity */
3311 kcpuset_zero(affinity);
3312 kcpuset_set(affinity, cpu_id % ncpu);
3313 error = interrupt_distribute(adapter->osdep.ihs[vector],
3314 affinity, NULL);
3315
3316 aprint_normal_dev(dev,
3317 "for link, interrupting at %s", intrstr);
3318 if (error == 0)
3319 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3320 else
3321 aprint_normal("\n");
3322
3323 /* Tasklets for Mailbox */
3324 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
3325 ixv_handle_link, adapter);
3326 /*
3327 * Due to a broken design QEMU will fail to properly
3328 * enable the guest for MSI-X unless the vectors in
3329 * the table are all set up, so we must rewrite the
3330 * ENABLE in the MSI-X control register again at this
3331 * point to cause it to successfully initialize us.
3332 */
3333 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3334 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3335 rid += PCI_MSIX_CTL;
3336 msix_ctrl = pci_conf_read(pc, tag, rid);
3337 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3338 pci_conf_write(pc, tag, rid, msix_ctrl);
3339 }
3340
3341 kcpuset_destroy(affinity);
3342 return (0);
3343 } /* ixv_allocate_msix */
3344
3345 /************************************************************************
3346 * ixv_configure_interrupts - Setup MSI-X resources
3347 *
3348 * Note: The VF device MUST use MSI-X, there is no fallback.
3349 ************************************************************************/
3350 static int
3351 ixv_configure_interrupts(struct adapter *adapter)
3352 {
3353 device_t dev = adapter->dev;
3354 int want, queues, msgs;
3355
3356 /* Must have at least 2 MSI-X vectors */
3357 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3358 if (msgs < 2) {
3359 aprint_error_dev(dev, "MSIX config error\n");
3360 return (ENXIO);
3361 }
3362 msgs = MIN(msgs, IXG_MAX_NINTR);
3363
3364 /* Figure out a reasonable auto config value */
3365 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3366
3367 if (ixv_num_queues != 0)
3368 queues = ixv_num_queues;
3369 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3370 queues = IXGBE_VF_MAX_TX_QUEUES;
3371
3372 /*
3373 * Want vectors for the queues,
3374 * plus an additional for mailbox.
3375 */
3376 want = queues + 1;
3377 if (msgs >= want)
3378 msgs = want;
3379 else {
3380 aprint_error_dev(dev,
3381 "MSI-X Configuration Problem, "
3382 "%d vectors but %d queues wanted!\n",
3383 msgs, want);
3384 return -1;
3385 }
3386
3387 adapter->msix_mem = (void *)1; /* XXX */
3388 aprint_normal_dev(dev,
3389 "Using MSI-X interrupts with %d vectors\n", msgs);
3390 adapter->num_queues = queues;
3391
3392 return (0);
3393 } /* ixv_configure_interrupts */
3394
3395
3396 /************************************************************************
3397 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3398 *
3399 * Done outside of interrupt context since the driver might sleep
3400 ************************************************************************/
3401 static void
3402 ixv_handle_link(void *context)
3403 {
3404 struct adapter *adapter = context;
3405
3406 IXGBE_CORE_LOCK(adapter);
3407
3408 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3409 &adapter->link_up, FALSE);
3410 ixv_update_link_status(adapter);
3411
3412 IXGBE_CORE_UNLOCK(adapter);
3413 } /* ixv_handle_link */
3414
3415 /************************************************************************
3416 * ixv_check_link - Used in the local timer to poll for link changes
3417 ************************************************************************/
3418 static s32
3419 ixv_check_link(struct adapter *adapter)
3420 {
3421 s32 error;
3422
3423 KASSERT(mutex_owned(&adapter->core_mtx));
3424
3425 adapter->hw.mac.get_link_status = TRUE;
3426
3427 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3428 &adapter->link_speed, &adapter->link_up, FALSE);
3429 ixv_update_link_status(adapter);
3430
3431 return error;
3432 } /* ixv_check_link */
3433