ixv.c revision 1.56 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
34 /*$NetBSD: ixv.c,v 1.56 2017/05/26 09:17:32 msaitoh Exp $*/
35
36 #ifdef _KERNEL_OPT
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_net_mpsafe.h"
40 #endif
41
42 #include "ixgbe.h"
43 #include "vlan.h"
44
45 /*********************************************************************
46 * Driver version
47 *********************************************************************/
48 char ixv_driver_version[] = "1.4.6-k";
49
50 /*********************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixv_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 *********************************************************************/
59
60 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
61 {
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
66 /* required last entry */
67 {0, 0, 0, 0, 0}
68 };
69
70 /*********************************************************************
71 * Table of branding strings
72 *********************************************************************/
73
74 static const char *ixv_strings[] = {
75 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 };
77
78 /*********************************************************************
79 * Function prototypes
80 *********************************************************************/
81 static int ixv_probe(device_t, cfdata_t, void *);
82 static void ixv_attach(device_t, device_t, void *);
83 static int ixv_detach(device_t, int);
84 #if 0
85 static int ixv_shutdown(device_t);
86 #endif
87 static int ixv_ioctl(struct ifnet *, u_long, void *);
88 static int ixv_init(struct ifnet *);
89 static void ixv_init_locked(struct adapter *);
90 static void ixv_ifstop(struct ifnet *, int);
91 static void ixv_stop(void *);
92 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
93 static int ixv_media_change(struct ifnet *);
94 static void ixv_identify_hardware(struct adapter *);
95 static int ixv_allocate_pci_resources(struct adapter *,
96 const struct pci_attach_args *);
97 static int ixv_allocate_msix(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_local_timer_locked(void *);
103 static void ixv_setup_interface(device_t, struct adapter *);
104 static void ixv_config_link(struct adapter *);
105
106 static void ixv_initialize_transmit_units(struct adapter *);
107 static void ixv_initialize_receive_units(struct adapter *);
108
109 static void ixv_enable_intr(struct adapter *);
110 static void ixv_disable_intr(struct adapter *);
111 static void ixv_set_multi(struct adapter *);
112 static void ixv_update_link_status(struct adapter *);
113 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
114 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
115 static void ixv_configure_ivars(struct adapter *);
116 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
117
118 static void ixv_setup_vlan_support(struct adapter *);
119 #if 0
120 static void ixv_register_vlan(void *, struct ifnet *, u16);
121 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
122 #endif
123
124 static void ixv_add_device_sysctls(struct adapter *);
125 static void ixv_save_stats(struct adapter *);
126 static void ixv_init_stats(struct adapter *);
127 static void ixv_update_stats(struct adapter *);
128 static void ixv_add_stats_sysctls(struct adapter *);
129 static void ixv_set_sysctl_value(struct adapter *, const char *,
130 const char *, int *, int);
131
132 /* The MSI/X Interrupt handlers */
133 static int ixv_msix_que(void *);
134 static int ixv_msix_mbx(void *);
135
136 /* Deferred interrupt tasklets */
137 static void ixv_handle_que(void *);
138 static void ixv_handle_mbx(void *);
139
140 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
141 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
142
143 #ifdef DEV_NETMAP
144 /*
145 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
146 * if_ix.c.
147 */
148 extern void ixgbe_netmap_attach(struct adapter *adapter);
149
150 #include <net/netmap.h>
151 #include <sys/selinfo.h>
152 #include <dev/netmap/netmap_kern.h>
153 #endif /* DEV_NETMAP */
154
155 /*********************************************************************
156 * FreeBSD Device Interface Entry Points
157 *********************************************************************/
158
159 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
160 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
161 DVF_DETACH_SHUTDOWN);
162
163 # if 0
164 static device_method_t ixv_methods[] = {
165 /* Device interface */
166 DEVMETHOD(device_probe, ixv_probe),
167 DEVMETHOD(device_attach, ixv_attach),
168 DEVMETHOD(device_detach, ixv_detach),
169 DEVMETHOD(device_shutdown, ixv_shutdown),
170 DEVMETHOD_END
171 };
172 #endif
173
174 #if 0
175 static driver_t ixv_driver = {
176 "ixv", ixv_methods, sizeof(struct adapter),
177 };
178
179 devclass_t ixv_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
183 #ifdef DEV_NETMAP
184 MODULE_DEPEND(ix, netmap, 1, 1, 1);
185 #endif /* DEV_NETMAP */
186 /* XXX depend on 'ix' ? */
187 #endif
188
189 /*
190 ** TUNEABLE PARAMETERS:
191 */
192
193 /* Number of Queues - do not exceed MSIX vectors - 1 */
194 static int ixv_num_queues = 0;
195 #define TUNABLE_INT(__x, __y)
196 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
197
198 /*
199 ** AIM: Adaptive Interrupt Moderation
200 ** which means that the interrupt rate
201 ** is varied over time based on the
202 ** traffic for that interrupt vector
203 */
204 static bool ixv_enable_aim = false;
205 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
206
207 /* How many packets rxeof tries to clean at a time */
208 static int ixv_rx_process_limit = 256;
209 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
210
211 /* How many packets txeof tries to clean at a time */
212 static int ixv_tx_process_limit = 256;
213 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
214
215 /*
216 ** Number of TX descriptors per ring,
217 ** setting higher than RX as this seems
218 ** the better performing choice.
219 */
220 static int ixv_txd = DEFAULT_TXD;
221 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
222
223 /* Number of RX descriptors per ring */
224 static int ixv_rxd = DEFAULT_RXD;
225 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
226
227 /*
228 ** Shadow VFTA table, this is needed because
229 ** the real filter table gets cleared during
230 ** a soft reset and we need to repopulate it.
231 */
232 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
233
234 #ifdef NET_MPSAFE
235 #define IXGBE_MPSAFE 1
236 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
237 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
238 #else
239 #define IXGBE_CALLOUT_FLAGS 0
240 #define IXGBE_SOFTINFT_FLAGS 0
241 #endif
242
243 /*********************************************************************
244 * Device identification routine
245 *
246 * ixv_probe determines if the driver should be loaded on
247 * adapter based on PCI vendor/device id of the adapter.
248 *
249 * return 1 on success, 0 on failure
250 *********************************************************************/
251
252 static int
253 ixv_probe(device_t dev, cfdata_t cf, void *aux)
254 {
255 #ifdef __HAVE_PCI_MSI_MSIX
256 const struct pci_attach_args *pa = aux;
257
258 return (ixv_lookup(pa) != NULL) ? 1 : 0;
259 #else
260 return 0;
261 #endif
262 }
263
264 static ixgbe_vendor_info_t *
265 ixv_lookup(const struct pci_attach_args *pa)
266 {
267 pcireg_t subid;
268 ixgbe_vendor_info_t *ent;
269
270 INIT_DEBUGOUT("ixv_lookup: begin");
271
272 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
273 return NULL;
274
275 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
276
277 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
278 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
279 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
280
281 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
282 (ent->subvendor_id == 0)) &&
283
284 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
285 (ent->subdevice_id == 0))) {
286 return ent;
287 }
288 }
289 return NULL;
290 }
291
292
293 /*********************************************************************
294 * Device initialization routine
295 *
296 * The attach entry point is called when the driver is being loaded.
297 * This routine identifies the type of hardware, allocates all resources
298 * and initializes the hardware.
299 *
300 * return 0 on success, positive on failure
301 *********************************************************************/
302
303 static void
304 ixv_attach(device_t parent, device_t dev, void *aux)
305 {
306 struct adapter *adapter;
307 struct ixgbe_hw *hw;
308 int error = 0;
309 ixgbe_vendor_info_t *ent;
310 const struct pci_attach_args *pa = aux;
311
312 INIT_DEBUGOUT("ixv_attach: begin");
313
314 /* Allocate, clear, and link in our adapter structure */
315 adapter = device_private(dev);
316 adapter->dev = dev;
317 hw = &adapter->hw;
318
319 #ifdef DEV_NETMAP
320 adapter->init_locked = ixv_init_locked;
321 adapter->stop_locked = ixv_stop;
322 #endif
323
324 adapter->osdep.pc = pa->pa_pc;
325 adapter->osdep.tag = pa->pa_tag;
326 if (pci_dma64_available(pa))
327 adapter->osdep.dmat = pa->pa_dmat64;
328 else
329 adapter->osdep.dmat = pa->pa_dmat;
330 adapter->osdep.attached = false;
331
332 ent = ixv_lookup(pa);
333
334 KASSERT(ent != NULL);
335
336 aprint_normal(": %s, Version - %s\n",
337 ixv_strings[ent->index], ixv_driver_version);
338
339 /* Core Lock Init*/
340 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
341
342 /* Set up the timer callout */
343 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
344
345 /* Determine hardware revision */
346 ixv_identify_hardware(adapter);
347
348 /* Do base PCI setup - map BAR0 */
349 if (ixv_allocate_pci_resources(adapter, pa)) {
350 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
351 error = ENXIO;
352 goto err_out;
353 }
354
355 /* Sysctls for limiting the amount of work done in the taskqueues */
356 ixv_set_sysctl_value(adapter, "rx_processing_limit",
357 "max number of rx packets to process",
358 &adapter->rx_process_limit, ixv_rx_process_limit);
359
360 ixv_set_sysctl_value(adapter, "tx_processing_limit",
361 "max number of tx packets to process",
362 &adapter->tx_process_limit, ixv_tx_process_limit);
363
364 /* Do descriptor calc and sanity checks */
365 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
366 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
367 aprint_error_dev(dev, "TXD config issue, using default!\n");
368 adapter->num_tx_desc = DEFAULT_TXD;
369 } else
370 adapter->num_tx_desc = ixv_txd;
371
372 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
373 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
374 aprint_error_dev(dev, "RXD config issue, using default!\n");
375 adapter->num_rx_desc = DEFAULT_RXD;
376 } else
377 adapter->num_rx_desc = ixv_rxd;
378
379 /* Allocate our TX/RX Queues */
380 if (ixgbe_allocate_queues(adapter)) {
381 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
382 error = ENOMEM;
383 goto err_out;
384 }
385
386 /*
387 ** Initialize the shared code: its
388 ** at this point the mac type is set.
389 */
390 error = ixgbe_init_shared_code(hw);
391 if (error) {
392 aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
393 error = EIO;
394 goto err_late;
395 }
396
397 /* Setup the mailbox */
398 ixgbe_init_mbx_params_vf(hw);
399
400 /* Reset mbox api to 1.0 */
401 error = ixgbe_reset_hw(hw);
402 if (error == IXGBE_ERR_RESET_FAILED)
403 aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
404 else if (error)
405 aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
406 if (error) {
407 error = EIO;
408 goto err_late;
409 }
410
411 /* Negotiate mailbox API version */
412 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
413 if (error)
414 aprint_debug_dev(dev,
415 "MBX API 1.1 negotiation failed! Error %d\n", error);
416
417 error = ixgbe_init_hw(hw);
418 if (error) {
419 aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
420 error = EIO;
421 goto err_late;
422 }
423
424 error = ixv_allocate_msix(adapter, pa);
425 if (error) {
426 device_printf(dev, "ixv_allocate_msix() failed!\n");
427 goto err_late;
428 }
429
430 /* If no mac address was assigned, make a random one */
431 if (!ixv_check_ether_addr(hw->mac.addr)) {
432 u8 addr[ETHER_ADDR_LEN];
433 uint64_t rndval = cprng_fast64();
434
435 memcpy(addr, &rndval, sizeof(addr));
436 addr[0] &= 0xFE;
437 addr[0] |= 0x02;
438 bcopy(addr, hw->mac.addr, sizeof(addr));
439 }
440
441 /* hw.ix defaults init */
442 adapter->enable_aim = ixv_enable_aim;
443
444 /* Setup OS specific network interface */
445 ixv_setup_interface(dev, adapter);
446
447 /* Do the stats setup */
448 ixv_save_stats(adapter);
449 ixv_init_stats(adapter);
450
451 /* Register for VLAN events */
452 #if 0 /* XXX delete after write? */
453 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
454 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
455 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
456 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
457 #endif
458
459 /* Add sysctls */
460 ixv_add_device_sysctls(adapter);
461 ixv_add_stats_sysctls(adapter);
462
463 #ifdef DEV_NETMAP
464 ixgbe_netmap_attach(adapter);
465 #endif /* DEV_NETMAP */
466 INIT_DEBUGOUT("ixv_attach: end");
467 adapter->osdep.attached = true;
468 return;
469
470 err_late:
471 ixgbe_free_transmit_structures(adapter);
472 ixgbe_free_receive_structures(adapter);
473 err_out:
474 ixv_free_pci_resources(adapter);
475 return;
476
477 }
478
479 /*********************************************************************
480 * Device removal routine
481 *
482 * The detach entry point is called when the driver is being removed.
483 * This routine stops the adapter and deallocates all the resources
484 * that were allocated for driver operation.
485 *
486 * return 0 on success, positive on failure
487 *********************************************************************/
488
489 static int
490 ixv_detach(device_t dev, int flags)
491 {
492 struct adapter *adapter = device_private(dev);
493 struct ix_queue *que = adapter->queues;
494 struct tx_ring *txr = adapter->tx_rings;
495 struct rx_ring *rxr = adapter->rx_rings;
496 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
497
498 INIT_DEBUGOUT("ixv_detach: begin");
499 if (adapter->osdep.attached == false)
500 return 0;
501
502 /* Stop the interface. Callouts are stopped in it. */
503 ixv_ifstop(adapter->ifp, 1);
504
505 #if NVLAN > 0
506 /* Make sure VLANS are not using driver */
507 if (!VLAN_ATTACHED(&adapter->osdep.ec))
508 ; /* nothing to do: no VLANs */
509 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
510 vlan_ifdetach(adapter->ifp);
511 else {
512 aprint_error_dev(dev, "VLANs in use, detach first\n");
513 return EBUSY;
514 }
515 #endif
516
517 IXGBE_CORE_LOCK(adapter);
518 ixv_stop(adapter);
519 IXGBE_CORE_UNLOCK(adapter);
520
521 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
522 #ifndef IXGBE_LEGACY_TX
523 softint_disestablish(txr->txr_si);
524 #endif
525 softint_disestablish(que->que_si);
526 }
527
528 /* Drain the Mailbox(link) queue */
529 softint_disestablish(adapter->link_si);
530
531 /* Unregister VLAN events */
532 #if 0 /* XXX msaitoh delete after write? */
533 if (adapter->vlan_attach != NULL)
534 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
535 if (adapter->vlan_detach != NULL)
536 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
537 #endif
538
539 ether_ifdetach(adapter->ifp);
540 callout_halt(&adapter->timer, NULL);
541 #ifdef DEV_NETMAP
542 netmap_detach(adapter->ifp);
543 #endif /* DEV_NETMAP */
544 ixv_free_pci_resources(adapter);
545 #if 0 /* XXX the NetBSD port is probably missing something here */
546 bus_generic_detach(dev);
547 #endif
548 if_detach(adapter->ifp);
549 if_percpuq_destroy(adapter->ipq);
550
551 sysctl_teardown(&adapter->sysctllog);
552 evcnt_detach(&adapter->handleq);
553 evcnt_detach(&adapter->req);
554 evcnt_detach(&adapter->efbig_tx_dma_setup);
555 evcnt_detach(&adapter->mbuf_defrag_failed);
556 evcnt_detach(&adapter->efbig2_tx_dma_setup);
557 evcnt_detach(&adapter->einval_tx_dma_setup);
558 evcnt_detach(&adapter->other_tx_dma_setup);
559 evcnt_detach(&adapter->eagain_tx_dma_setup);
560 evcnt_detach(&adapter->enomem_tx_dma_setup);
561 evcnt_detach(&adapter->watchdog_events);
562 evcnt_detach(&adapter->tso_err);
563 evcnt_detach(&adapter->link_irq);
564
565 txr = adapter->tx_rings;
566 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
567 evcnt_detach(&adapter->queues[i].irqs);
568 evcnt_detach(&txr->no_desc_avail);
569 evcnt_detach(&txr->total_packets);
570 evcnt_detach(&txr->tso_tx);
571 #ifndef IXGBE_LEGACY_TX
572 evcnt_detach(&txr->pcq_drops);
573 #endif
574
575 evcnt_detach(&rxr->rx_packets);
576 evcnt_detach(&rxr->rx_bytes);
577 evcnt_detach(&rxr->rx_copies);
578 evcnt_detach(&rxr->no_jmbuf);
579 evcnt_detach(&rxr->rx_discarded);
580 }
581 evcnt_detach(&stats->ipcs);
582 evcnt_detach(&stats->l4cs);
583 evcnt_detach(&stats->ipcs_bad);
584 evcnt_detach(&stats->l4cs_bad);
585
586 /* Packet Reception Stats */
587 evcnt_detach(&stats->vfgorc);
588 evcnt_detach(&stats->vfgprc);
589 evcnt_detach(&stats->vfmprc);
590
591 /* Packet Transmission Stats */
592 evcnt_detach(&stats->vfgotc);
593 evcnt_detach(&stats->vfgptc);
594
595 ixgbe_free_transmit_structures(adapter);
596 ixgbe_free_receive_structures(adapter);
597
598 IXGBE_CORE_LOCK_DESTROY(adapter);
599 return (0);
600 }
601
602 /*********************************************************************
603 *
604 * Shutdown entry point
605 *
606 **********************************************************************/
607 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
608 static int
609 ixv_shutdown(device_t dev)
610 {
611 struct adapter *adapter = device_private(dev);
612 IXGBE_CORE_LOCK(adapter);
613 ixv_stop(adapter);
614 IXGBE_CORE_UNLOCK(adapter);
615 return (0);
616 }
617 #endif
618
619 static int
620 ixv_ifflags_cb(struct ethercom *ec)
621 {
622 struct ifnet *ifp = &ec->ec_if;
623 struct adapter *adapter = ifp->if_softc;
624 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
625
626 IXGBE_CORE_LOCK(adapter);
627
628 if (change != 0)
629 adapter->if_flags = ifp->if_flags;
630
631 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
632 rc = ENETRESET;
633
634 IXGBE_CORE_UNLOCK(adapter);
635
636 return rc;
637 }
638
639 /*********************************************************************
640 * Ioctl entry point
641 *
642 * ixv_ioctl is called when the user wants to configure the
643 * interface.
644 *
645 * return 0 on success, positive on failure
646 **********************************************************************/
647
648 static int
649 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
650 {
651 struct adapter *adapter = ifp->if_softc;
652 struct ifcapreq *ifcr = data;
653 struct ifreq *ifr = (struct ifreq *) data;
654 int error = 0;
655 int l4csum_en;
656 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
657 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
658
659 switch (command) {
660 case SIOCSIFFLAGS:
661 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
662 break;
663 case SIOCADDMULTI:
664 case SIOCDELMULTI:
665 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
666 break;
667 case SIOCSIFMEDIA:
668 case SIOCGIFMEDIA:
669 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
670 break;
671 case SIOCSIFCAP:
672 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
673 break;
674 case SIOCSIFMTU:
675 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
676 break;
677 default:
678 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
679 break;
680 }
681
682 switch (command) {
683 case SIOCSIFMEDIA:
684 case SIOCGIFMEDIA:
685 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
686 case SIOCSIFCAP:
687 /* Layer-4 Rx checksum offload has to be turned on and
688 * off as a unit.
689 */
690 l4csum_en = ifcr->ifcr_capenable & l4csum;
691 if (l4csum_en != l4csum && l4csum_en != 0)
692 return EINVAL;
693 /*FALLTHROUGH*/
694 case SIOCADDMULTI:
695 case SIOCDELMULTI:
696 case SIOCSIFFLAGS:
697 case SIOCSIFMTU:
698 default:
699 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
700 return error;
701 if ((ifp->if_flags & IFF_RUNNING) == 0)
702 ;
703 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
704 IXGBE_CORE_LOCK(adapter);
705 ixv_init_locked(adapter);
706 IXGBE_CORE_UNLOCK(adapter);
707 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
708 /*
709 * Multicast list has changed; set the hardware filter
710 * accordingly.
711 */
712 IXGBE_CORE_LOCK(adapter);
713 ixv_disable_intr(adapter);
714 ixv_set_multi(adapter);
715 ixv_enable_intr(adapter);
716 IXGBE_CORE_UNLOCK(adapter);
717 }
718 return 0;
719 }
720 }
721
722 /*********************************************************************
723 * Init entry point
724 *
725 * This routine is used in two ways. It is used by the stack as
726 * init entry point in network interface structure. It is also used
727 * by the driver as a hw/sw initialization routine to get to a
728 * consistent state.
729 *
730 * return 0 on success, positive on failure
731 **********************************************************************/
732 #define IXGBE_MHADD_MFS_SHIFT 16
733
734 static void
735 ixv_init_locked(struct adapter *adapter)
736 {
737 struct ifnet *ifp = adapter->ifp;
738 device_t dev = adapter->dev;
739 struct ixgbe_hw *hw = &adapter->hw;
740 int error = 0;
741
742 INIT_DEBUGOUT("ixv_init_locked: begin");
743 KASSERT(mutex_owned(&adapter->core_mtx));
744 hw->adapter_stopped = FALSE;
745 ixgbe_stop_adapter(hw);
746 callout_stop(&adapter->timer);
747
748 /* reprogram the RAR[0] in case user changed it. */
749 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
750
751 /* Get the latest mac address, User can use a LAA */
752 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
753 IXGBE_ETH_LENGTH_OF_ADDRESS);
754 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
755 hw->addr_ctrl.rar_used_count = 1;
756
757 /* Prepare transmit descriptors and buffers */
758 if (ixgbe_setup_transmit_structures(adapter)) {
759 aprint_error_dev(dev, "Could not setup transmit structures\n");
760 ixv_stop(adapter);
761 return;
762 }
763
764 /* Reset VF and renegotiate mailbox API version */
765 ixgbe_reset_hw(hw);
766 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
767 if (error)
768 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
769
770 ixv_initialize_transmit_units(adapter);
771
772 /* Setup Multicast table */
773 ixv_set_multi(adapter);
774
775 /*
776 ** Determine the correct mbuf pool
777 ** for doing jumbo/headersplit
778 */
779 if (ifp->if_mtu > ETHERMTU)
780 adapter->rx_mbuf_sz = MJUMPAGESIZE;
781 else
782 adapter->rx_mbuf_sz = MCLBYTES;
783
784 /* Prepare receive descriptors and buffers */
785 if (ixgbe_setup_receive_structures(adapter)) {
786 device_printf(dev, "Could not setup receive structures\n");
787 ixv_stop(adapter);
788 return;
789 }
790
791 /* Configure RX settings */
792 ixv_initialize_receive_units(adapter);
793
794 #if 0 /* XXX isn't it required? -- msaitoh */
795 /* Set the various hardware offload abilities */
796 ifp->if_hwassist = 0;
797 if (ifp->if_capenable & IFCAP_TSO4)
798 ifp->if_hwassist |= CSUM_TSO;
799 if (ifp->if_capenable & IFCAP_TXCSUM) {
800 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
801 #if __FreeBSD_version >= 800000
802 ifp->if_hwassist |= CSUM_SCTP;
803 #endif
804 }
805 #endif
806
807 /* Set up VLAN offload and filter */
808 ixv_setup_vlan_support(adapter);
809
810 /* Set up MSI/X routing */
811 ixv_configure_ivars(adapter);
812
813 /* Set up auto-mask */
814 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
815
816 /* Set moderation on the Link interrupt */
817 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
818
819 /* Stats init */
820 ixv_init_stats(adapter);
821
822 /* Config/Enable Link */
823 ixv_config_link(adapter);
824 hw->mac.get_link_status = TRUE;
825
826 /* Start watchdog */
827 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
828
829 /* And now turn on interrupts */
830 ixv_enable_intr(adapter);
831
832 /* Now inform the stack we're ready */
833 ifp->if_flags |= IFF_RUNNING;
834 ifp->if_flags &= ~IFF_OACTIVE;
835
836 return;
837 }
838
839 static int
840 ixv_init(struct ifnet *ifp)
841 {
842 struct adapter *adapter = ifp->if_softc;
843
844 IXGBE_CORE_LOCK(adapter);
845 ixv_init_locked(adapter);
846 IXGBE_CORE_UNLOCK(adapter);
847 return 0;
848 }
849
850
851 /*
852 **
853 ** MSIX Interrupt Handlers and Tasklets
854 **
855 */
856
857 static inline void
858 ixv_enable_queue(struct adapter *adapter, u32 vector)
859 {
860 struct ixgbe_hw *hw = &adapter->hw;
861 u32 queue = 1 << vector;
862 u32 mask;
863
864 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
865 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
866 }
867
868 static inline void
869 ixv_disable_queue(struct adapter *adapter, u32 vector)
870 {
871 struct ixgbe_hw *hw = &adapter->hw;
872 u64 queue = (u64)(1 << vector);
873 u32 mask;
874
875 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
876 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
877 }
878
879 static inline void
880 ixv_rearm_queues(struct adapter *adapter, u64 queues)
881 {
882 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
883 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
884 }
885
886
887 static void
888 ixv_handle_que(void *context)
889 {
890 struct ix_queue *que = context;
891 struct adapter *adapter = que->adapter;
892 struct tx_ring *txr = que->txr;
893 struct ifnet *ifp = adapter->ifp;
894 bool more;
895
896 adapter->handleq.ev_count++;
897
898 if (ifp->if_flags & IFF_RUNNING) {
899 more = ixgbe_rxeof(que);
900 IXGBE_TX_LOCK(txr);
901 ixgbe_txeof(txr);
902 #ifndef IXGBE_LEGACY_TX
903 if (pcq_peek(txr->txr_interq) != NULL)
904 ixgbe_mq_start_locked(ifp, txr);
905 #endif
906 /* Only for queue 0 */
907 if ((&adapter->queues[0] == que)
908 && (!IFQ_IS_EMPTY(&ifp->if_snd)))
909 ixgbe_start_locked(txr, ifp);
910 IXGBE_TX_UNLOCK(txr);
911 if (more) {
912 adapter->req.ev_count++;
913 softint_schedule(que->que_si);
914 return;
915 }
916 }
917
918 /* Reenable this interrupt */
919 ixv_enable_queue(adapter, que->msix);
920 return;
921 }
922
923 /*********************************************************************
924 *
925 * MSI Queue Interrupt Service routine
926 *
927 **********************************************************************/
928 int
929 ixv_msix_que(void *arg)
930 {
931 struct ix_queue *que = arg;
932 struct adapter *adapter = que->adapter;
933 #ifdef IXGBE_LEGACY_TX
934 struct ifnet *ifp = adapter->ifp;
935 #endif
936 struct tx_ring *txr = que->txr;
937 struct rx_ring *rxr = que->rxr;
938 bool more;
939 u32 newitr = 0;
940
941 ixv_disable_queue(adapter, que->msix);
942 ++que->irqs.ev_count;
943
944 #ifdef __NetBSD__
945 /* Don't run ixgbe_rxeof in interrupt context */
946 more = true;
947 #else
948 more = ixgbe_rxeof(que);
949 #endif
950
951 IXGBE_TX_LOCK(txr);
952 ixgbe_txeof(txr);
953 IXGBE_TX_UNLOCK(txr);
954
955 /* Do AIM now? */
956
957 if (adapter->enable_aim == false)
958 goto no_calc;
959 /*
960 ** Do Adaptive Interrupt Moderation:
961 ** - Write out last calculated setting
962 ** - Calculate based on average size over
963 ** the last interval.
964 */
965 if (que->eitr_setting)
966 IXGBE_WRITE_REG(&adapter->hw,
967 IXGBE_VTEITR(que->msix),
968 que->eitr_setting);
969
970 que->eitr_setting = 0;
971
972 /* Idle, do nothing */
973 if ((txr->bytes == 0) && (rxr->bytes == 0))
974 goto no_calc;
975
976 if ((txr->bytes) && (txr->packets))
977 newitr = txr->bytes/txr->packets;
978 if ((rxr->bytes) && (rxr->packets))
979 newitr = max(newitr,
980 (rxr->bytes / rxr->packets));
981 newitr += 24; /* account for hardware frame, crc */
982
983 /* set an upper boundary */
984 newitr = min(newitr, 3000);
985
986 /* Be nice to the mid range */
987 if ((newitr > 300) && (newitr < 1200))
988 newitr = (newitr / 3);
989 else
990 newitr = (newitr / 2);
991
992 newitr |= newitr << 16;
993
994 /* save for next interrupt */
995 que->eitr_setting = newitr;
996
997 /* Reset state */
998 txr->bytes = 0;
999 txr->packets = 0;
1000 rxr->bytes = 0;
1001 rxr->packets = 0;
1002
1003 no_calc:
1004 if (more)
1005 softint_schedule(que->que_si);
1006 else /* Reenable this interrupt */
1007 ixv_enable_queue(adapter, que->msix);
1008 return 1;
1009 }
1010
1011 static int
1012 ixv_msix_mbx(void *arg)
1013 {
1014 struct adapter *adapter = arg;
1015 struct ixgbe_hw *hw = &adapter->hw;
1016 u32 reg;
1017
1018 ++adapter->link_irq.ev_count;
1019
1020 /* First get the cause */
1021 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1022 /* Clear interrupt with write */
1023 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1024
1025 /* Link status change */
1026 if (reg & IXGBE_EICR_LSC)
1027 softint_schedule(adapter->link_si);
1028
1029 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1030 return 1;
1031 }
1032
1033 /*********************************************************************
1034 *
1035 * Media Ioctl callback
1036 *
1037 * This routine is called whenever the user queries the status of
1038 * the interface using ifconfig.
1039 *
1040 **********************************************************************/
1041 static void
1042 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1043 {
1044 struct adapter *adapter = ifp->if_softc;
1045
1046 INIT_DEBUGOUT("ixv_media_status: begin");
1047 IXGBE_CORE_LOCK(adapter);
1048 ixv_update_link_status(adapter);
1049
1050 ifmr->ifm_status = IFM_AVALID;
1051 ifmr->ifm_active = IFM_ETHER;
1052
1053 if (!adapter->link_active) {
1054 ifmr->ifm_active |= IFM_NONE;
1055 IXGBE_CORE_UNLOCK(adapter);
1056 return;
1057 }
1058
1059 ifmr->ifm_status |= IFM_ACTIVE;
1060
1061 switch (adapter->link_speed) {
1062 case IXGBE_LINK_SPEED_10GB_FULL:
1063 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1064 break;
1065 case IXGBE_LINK_SPEED_1GB_FULL:
1066 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1067 break;
1068 case IXGBE_LINK_SPEED_100_FULL:
1069 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1070 break;
1071 }
1072
1073 IXGBE_CORE_UNLOCK(adapter);
1074
1075 return;
1076 }
1077
1078 /*********************************************************************
1079 *
1080 * Media Ioctl callback
1081 *
1082 * This routine is called when the user changes speed/duplex using
1083 * media/mediopt option with ifconfig.
1084 *
1085 **********************************************************************/
1086 static int
1087 ixv_media_change(struct ifnet * ifp)
1088 {
1089 struct adapter *adapter = ifp->if_softc;
1090 struct ifmedia *ifm = &adapter->media;
1091
1092 INIT_DEBUGOUT("ixv_media_change: begin");
1093
1094 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1095 return (EINVAL);
1096
1097 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1098 case IFM_AUTO:
1099 break;
1100 default:
1101 device_printf(adapter->dev, "Only auto media type\n");
1102 return (EINVAL);
1103 }
1104
1105 return (0);
1106 }
1107
1108
1109 /*********************************************************************
1110 * Multicast Update
1111 *
1112 * This routine is called whenever multicast address list is updated.
1113 *
1114 **********************************************************************/
1115 #define IXGBE_RAR_ENTRIES 16
1116
1117 static void
1118 ixv_set_multi(struct adapter *adapter)
1119 {
1120 struct ether_multi *enm;
1121 struct ether_multistep step;
1122 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1123 u8 *update_ptr;
1124 int mcnt = 0;
1125 struct ethercom *ec = &adapter->osdep.ec;
1126
1127 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1128
1129 ETHER_FIRST_MULTI(step, ec, enm);
1130 while (enm != NULL) {
1131 bcopy(enm->enm_addrlo,
1132 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1133 IXGBE_ETH_LENGTH_OF_ADDRESS);
1134 mcnt++;
1135 /* XXX This might be required --msaitoh */
1136 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1137 break;
1138 ETHER_NEXT_MULTI(step, enm);
1139 }
1140
1141 update_ptr = mta;
1142
1143 ixgbe_update_mc_addr_list(&adapter->hw,
1144 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1145
1146 return;
1147 }
1148
1149 /*
1150 * This is an iterator function now needed by the multicast
1151 * shared code. It simply feeds the shared code routine the
1152 * addresses in the array of ixv_set_multi() one by one.
1153 */
1154 static u8 *
1155 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1156 {
1157 u8 *addr = *update_ptr;
1158 u8 *newptr;
1159 *vmdq = 0;
1160
1161 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1162 *update_ptr = newptr;
1163 return addr;
1164 }
1165
1166 /*********************************************************************
1167 * Timer routine
1168 *
1169 * This routine checks for link status,updates statistics,
1170 * and runs the watchdog check.
1171 *
1172 **********************************************************************/
1173
1174 static void
1175 ixv_local_timer(void *arg)
1176 {
1177 struct adapter *adapter = arg;
1178
1179 IXGBE_CORE_LOCK(adapter);
1180 ixv_local_timer_locked(adapter);
1181 IXGBE_CORE_UNLOCK(adapter);
1182 }
1183
1184 static void
1185 ixv_local_timer_locked(void *arg)
1186 {
1187 struct adapter *adapter = arg;
1188 device_t dev = adapter->dev;
1189 struct ix_queue *que = adapter->queues;
1190 u64 queues = 0;
1191 int hung = 0;
1192
1193 KASSERT(mutex_owned(&adapter->core_mtx));
1194
1195 ixv_update_link_status(adapter);
1196
1197 /* Stats Update */
1198 ixv_update_stats(adapter);
1199
1200 /*
1201 ** Check the TX queues status
1202 ** - mark hung queues so we don't schedule on them
1203 ** - watchdog only if all queues show hung
1204 */
1205 for (int i = 0; i < adapter->num_queues; i++, que++) {
1206 /* Keep track of queues with work for soft irq */
1207 if (que->txr->busy)
1208 queues |= ((u64)1 << que->me);
1209 /*
1210 ** Each time txeof runs without cleaning, but there
1211 ** are uncleaned descriptors it increments busy. If
1212 ** we get to the MAX we declare it hung.
1213 */
1214 if (que->busy == IXGBE_QUEUE_HUNG) {
1215 ++hung;
1216 /* Mark the queue as inactive */
1217 adapter->active_queues &= ~((u64)1 << que->me);
1218 continue;
1219 } else {
1220 /* Check if we've come back from hung */
1221 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1222 adapter->active_queues |= ((u64)1 << que->me);
1223 }
1224 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1225 device_printf(dev,"Warning queue %d "
1226 "appears to be hung!\n", i);
1227 que->txr->busy = IXGBE_QUEUE_HUNG;
1228 ++hung;
1229 }
1230
1231 }
1232
1233 /* Only truly watchdog if all queues show hung */
1234 if (hung == adapter->num_queues)
1235 goto watchdog;
1236 else if (queues != 0) { /* Force an IRQ on queues with work */
1237 ixv_rearm_queues(adapter, queues);
1238 }
1239
1240 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1241 return;
1242
1243 watchdog:
1244 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1245 adapter->ifp->if_flags &= ~IFF_RUNNING;
1246 adapter->watchdog_events.ev_count++;
1247 ixv_init_locked(adapter);
1248 }
1249
1250 /*
1251 ** Note: this routine updates the OS on the link state
1252 ** the real check of the hardware only happens with
1253 ** a link interrupt.
1254 */
1255 static void
1256 ixv_update_link_status(struct adapter *adapter)
1257 {
1258 struct ifnet *ifp = adapter->ifp;
1259 device_t dev = adapter->dev;
1260
1261 if (adapter->link_up){
1262 if (adapter->link_active == FALSE) {
1263 if (bootverbose) {
1264 const char *bpsmsg;
1265
1266 switch (adapter->link_speed) {
1267 case IXGBE_LINK_SPEED_10GB_FULL:
1268 bpsmsg = "10 Gbps";
1269 break;
1270 case IXGBE_LINK_SPEED_1GB_FULL:
1271 bpsmsg = "1 Gbps";
1272 break;
1273 case IXGBE_LINK_SPEED_100_FULL:
1274 bpsmsg = "100 Mbps";
1275 break;
1276 default:
1277 bpsmsg = "unknown speed";
1278 break;
1279 }
1280 device_printf(dev,"Link is up %s %s \n",
1281 bpsmsg, "Full Duplex");
1282 }
1283 adapter->link_active = TRUE;
1284 if_link_state_change(ifp, LINK_STATE_UP);
1285 }
1286 } else { /* Link down */
1287 if (adapter->link_active == TRUE) {
1288 if (bootverbose)
1289 device_printf(dev,"Link is Down\n");
1290 if_link_state_change(ifp, LINK_STATE_DOWN);
1291 adapter->link_active = FALSE;
1292 }
1293 }
1294
1295 return;
1296 }
1297
1298
1299 static void
1300 ixv_ifstop(struct ifnet *ifp, int disable)
1301 {
1302 struct adapter *adapter = ifp->if_softc;
1303
1304 IXGBE_CORE_LOCK(adapter);
1305 ixv_stop(adapter);
1306 IXGBE_CORE_UNLOCK(adapter);
1307 }
1308
1309 /*********************************************************************
1310 *
1311 * This routine disables all traffic on the adapter by issuing a
1312 * global reset on the MAC and deallocates TX/RX buffers.
1313 *
1314 **********************************************************************/
1315
1316 static void
1317 ixv_stop(void *arg)
1318 {
1319 struct ifnet *ifp;
1320 struct adapter *adapter = arg;
1321 struct ixgbe_hw *hw = &adapter->hw;
1322 ifp = adapter->ifp;
1323
1324 KASSERT(mutex_owned(&adapter->core_mtx));
1325
1326 INIT_DEBUGOUT("ixv_stop: begin\n");
1327 ixv_disable_intr(adapter);
1328
1329 /* Tell the stack that the interface is no longer active */
1330 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1331
1332 ixgbe_reset_hw(hw);
1333 adapter->hw.adapter_stopped = FALSE;
1334 ixgbe_stop_adapter(hw);
1335 callout_stop(&adapter->timer);
1336
1337 /* reprogram the RAR[0] in case user changed it. */
1338 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1339
1340 return;
1341 }
1342
1343
1344 /*********************************************************************
1345 *
1346 * Determine hardware revision.
1347 *
1348 **********************************************************************/
1349 static void
1350 ixv_identify_hardware(struct adapter *adapter)
1351 {
1352 pcitag_t tag;
1353 pci_chipset_tag_t pc;
1354 pcireg_t subid, id;
1355 struct ixgbe_hw *hw = &adapter->hw;
1356
1357 pc = adapter->osdep.pc;
1358 tag = adapter->osdep.tag;
1359
1360 /*
1361 ** Make sure BUSMASTER is set, on a VM under
1362 ** KVM it may not be and will break things.
1363 */
1364 ixgbe_pci_enable_busmaster(pc, tag);
1365
1366 id = pci_conf_read(pc, tag, PCI_ID_REG);
1367 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1368
1369 /* Save off the information about this board */
1370 hw->vendor_id = PCI_VENDOR(id);
1371 hw->device_id = PCI_PRODUCT(id);
1372 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1373 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1374 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1375
1376 /* We need this to determine device-specific things */
1377 ixgbe_set_mac_type(hw);
1378
1379 /* Set the right number of segments */
1380 adapter->num_segs = IXGBE_82599_SCATTER;
1381
1382 return;
1383 }
1384
1385 /*********************************************************************
1386 *
1387 * Setup MSIX Interrupt resources and handlers
1388 *
1389 **********************************************************************/
1390 static int
1391 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1392 {
1393 device_t dev = adapter->dev;
1394 struct ix_queue *que = adapter->queues;
1395 struct tx_ring *txr = adapter->tx_rings;
1396 int error, rid, vector = 0;
1397 pci_chipset_tag_t pc;
1398 pcitag_t tag;
1399 char intrbuf[PCI_INTRSTR_LEN];
1400 char intr_xname[32];
1401 const char *intrstr = NULL;
1402 kcpuset_t *affinity;
1403 int cpu_id = 0;
1404
1405 pc = adapter->osdep.pc;
1406 tag = adapter->osdep.tag;
1407
1408 adapter->osdep.nintrs = adapter->num_queues + 1;
1409 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
1410 adapter->osdep.nintrs) != 0) {
1411 aprint_error_dev(dev,
1412 "failed to allocate MSI-X interrupt\n");
1413 return (ENXIO);
1414 }
1415
1416 kcpuset_create(&affinity, false);
1417 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1418 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
1419 device_xname(dev), i);
1420 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1421 sizeof(intrbuf));
1422 #ifdef IXGBE_MPSAFE
1423 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1424 true);
1425 #endif
1426 /* Set the handler function */
1427 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
1428 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
1429 intr_xname);
1430 if (que->res == NULL) {
1431 pci_intr_release(pc, adapter->osdep.intrs,
1432 adapter->osdep.nintrs);
1433 aprint_error_dev(dev,
1434 "Failed to register QUE handler\n");
1435 kcpuset_destroy(affinity);
1436 return (ENXIO);
1437 }
1438 que->msix = vector;
1439 adapter->active_queues |= (u64)(1 << que->msix);
1440
1441 cpu_id = i;
1442 /* Round-robin affinity */
1443 kcpuset_zero(affinity);
1444 kcpuset_set(affinity, cpu_id % ncpu);
1445 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1446 NULL);
1447 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1448 intrstr);
1449 if (error == 0)
1450 aprint_normal(", bound queue %d to cpu %d\n",
1451 i, cpu_id % ncpu);
1452 else
1453 aprint_normal("\n");
1454
1455 #ifndef IXGBE_LEGACY_TX
1456 txr->txr_si
1457 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1458 ixgbe_deferred_mq_start, txr);
1459 #endif
1460 que->que_si
1461 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1462 ixv_handle_que, que);
1463 if (que->que_si == NULL) {
1464 aprint_error_dev(dev,
1465 "could not establish software interrupt\n");
1466 }
1467 }
1468
1469 /* and Mailbox */
1470 cpu_id++;
1471 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
1472 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1473 sizeof(intrbuf));
1474 #ifdef IXGBE_MPSAFE
1475 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
1476 true);
1477 #endif
1478 /* Set the mbx handler function */
1479 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
1480 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
1481 intr_xname);
1482 if (adapter->osdep.ihs[vector] == NULL) {
1483 adapter->res = NULL;
1484 aprint_error_dev(dev, "Failed to register LINK handler\n");
1485 kcpuset_destroy(affinity);
1486 return (ENXIO);
1487 }
1488 /* Round-robin affinity */
1489 kcpuset_zero(affinity);
1490 kcpuset_set(affinity, cpu_id % ncpu);
1491 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1492
1493 aprint_normal_dev(dev,
1494 "for link, interrupting at %s", intrstr);
1495 if (error == 0)
1496 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
1497 else
1498 aprint_normal("\n");
1499
1500 adapter->vector = vector;
1501 /* Tasklets for Mailbox */
1502 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1503 ixv_handle_mbx, adapter);
1504 /*
1505 ** Due to a broken design QEMU will fail to properly
1506 ** enable the guest for MSIX unless the vectors in
1507 ** the table are all set up, so we must rewrite the
1508 ** ENABLE in the MSIX control register again at this
1509 ** point to cause it to successfully initialize us.
1510 */
1511 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1512 int msix_ctrl;
1513 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1514 rid += PCI_MSIX_CTL;
1515 msix_ctrl = pci_conf_read(pc, tag, rid);
1516 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1517 pci_conf_write(pc, tag, rid, msix_ctrl);
1518 }
1519
1520 kcpuset_destroy(affinity);
1521 return (0);
1522 }
1523
1524 /*
1525 * Setup MSIX resources, note that the VF
1526 * device MUST use MSIX, there is no fallback.
1527 */
1528 static int
1529 ixv_setup_msix(struct adapter *adapter)
1530 {
1531 device_t dev = adapter->dev;
1532 int want, queues, msgs;
1533
1534 /* Must have at least 2 MSIX vectors */
1535 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1536 if (msgs < 2) {
1537 aprint_error_dev(dev,"MSIX config error\n");
1538 return (ENXIO);
1539 }
1540 msgs = MIN(msgs, IXG_MAX_NINTR);
1541
1542 /* Figure out a reasonable auto config value */
1543 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
1544
1545 if (ixv_num_queues != 0)
1546 queues = ixv_num_queues;
1547 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
1548 queues = IXGBE_VF_MAX_TX_QUEUES;
1549
1550 /*
1551 ** Want vectors for the queues,
1552 ** plus an additional for mailbox.
1553 */
1554 want = queues + 1;
1555 if (msgs >= want)
1556 msgs = want;
1557 else {
1558 aprint_error_dev(dev,
1559 "MSIX Configuration Problem, "
1560 "%d vectors but %d queues wanted!\n",
1561 msgs, want);
1562 return -1;
1563 }
1564
1565 adapter->msix_mem = (void *)1; /* XXX */
1566 aprint_normal_dev(dev,
1567 "Using MSIX interrupts with %d vectors\n", msgs);
1568 adapter->num_queues = queues;
1569 return (msgs);
1570 }
1571
1572
1573 static int
1574 ixv_allocate_pci_resources(struct adapter *adapter,
1575 const struct pci_attach_args *pa)
1576 {
1577 pcireg_t memtype;
1578 device_t dev = adapter->dev;
1579 bus_addr_t addr;
1580 int flags;
1581
1582 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1583 switch (memtype) {
1584 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1585 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1586 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1587 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1588 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1589 goto map_err;
1590 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1591 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1592 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1593 }
1594 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1595 adapter->osdep.mem_size, flags,
1596 &adapter->osdep.mem_bus_space_handle) != 0) {
1597 map_err:
1598 adapter->osdep.mem_size = 0;
1599 aprint_error_dev(dev, "unable to map BAR0\n");
1600 return ENXIO;
1601 }
1602 break;
1603 default:
1604 aprint_error_dev(dev, "unexpected type on BAR0\n");
1605 return ENXIO;
1606 }
1607 adapter->hw.back = adapter;
1608
1609 /* Pick up the tuneable queues */
1610 adapter->num_queues = ixv_num_queues;
1611
1612 /*
1613 ** Now setup MSI/X, should
1614 ** return us the number of
1615 ** configured vectors.
1616 */
1617 adapter->msix = ixv_setup_msix(adapter);
1618 if (adapter->msix == ENXIO)
1619 return (ENXIO);
1620 else
1621 return (0);
1622 }
1623
1624 static void
1625 ixv_free_pci_resources(struct adapter * adapter)
1626 {
1627 struct ix_queue *que = adapter->queues;
1628 int rid;
1629
1630 /*
1631 ** Release all msix queue resources:
1632 */
1633 for (int i = 0; i < adapter->num_queues; i++, que++) {
1634 if (que->res != NULL)
1635 pci_intr_disestablish(adapter->osdep.pc,
1636 adapter->osdep.ihs[i]);
1637 }
1638
1639
1640 /* Clean the Link interrupt last */
1641 rid = adapter->vector;
1642
1643 if (adapter->osdep.ihs[rid] != NULL) {
1644 pci_intr_disestablish(adapter->osdep.pc,
1645 adapter->osdep.ihs[rid]);
1646 adapter->osdep.ihs[rid] = NULL;
1647 }
1648
1649 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1650 adapter->osdep.nintrs);
1651
1652 if (adapter->osdep.mem_size != 0) {
1653 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1654 adapter->osdep.mem_bus_space_handle,
1655 adapter->osdep.mem_size);
1656 }
1657
1658 return;
1659 }
1660
1661 /*********************************************************************
1662 *
1663 * Setup networking device structure and register an interface.
1664 *
1665 **********************************************************************/
1666 static void
1667 ixv_setup_interface(device_t dev, struct adapter *adapter)
1668 {
1669 struct ethercom *ec = &adapter->osdep.ec;
1670 struct ifnet *ifp;
1671
1672 INIT_DEBUGOUT("ixv_setup_interface: begin");
1673
1674 ifp = adapter->ifp = &ec->ec_if;
1675 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1676 ifp->if_baudrate = IF_Gbps(10);
1677 ifp->if_init = ixv_init;
1678 ifp->if_stop = ixv_ifstop;
1679 ifp->if_softc = adapter;
1680 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1681 #ifdef IXGBE_MPSAFE
1682 ifp->if_extflags = IFEF_START_MPSAFE;
1683 #endif
1684 ifp->if_ioctl = ixv_ioctl;
1685 #ifndef IXGBE_LEGACY_TX
1686 ifp->if_transmit = ixgbe_mq_start;
1687 #endif
1688 ifp->if_start = ixgbe_start;
1689 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1690 IFQ_SET_READY(&ifp->if_snd);
1691
1692 if_initialize(ifp);
1693 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1694 ether_ifattach(ifp, adapter->hw.mac.addr);
1695 /*
1696 * We use per TX queue softint, so if_deferred_start_init() isn't
1697 * used.
1698 */
1699 if_register(ifp);
1700 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1701
1702 adapter->max_frame_size =
1703 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1704
1705 /*
1706 * Tell the upper layer(s) we support long frames.
1707 */
1708 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1709
1710 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1711 ifp->if_capenable = 0;
1712
1713 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1714 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1715 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1716 | ETHERCAP_VLAN_MTU;
1717 ec->ec_capenable = ec->ec_capabilities;
1718
1719 /* Don't enable LRO by default */
1720 ifp->if_capabilities |= IFCAP_LRO;
1721 #if 0
1722 ifp->if_capenable = ifp->if_capabilities;
1723 #endif
1724
1725 /*
1726 ** Dont turn this on by default, if vlans are
1727 ** created on another pseudo device (eg. lagg)
1728 ** then vlan events are not passed thru, breaking
1729 ** operation, but with HW FILTER off it works. If
1730 ** using vlans directly on the em driver you can
1731 ** enable this and get full hardware tag filtering.
1732 */
1733 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1734
1735 /*
1736 * Specify the media types supported by this adapter and register
1737 * callbacks to update media and link information
1738 */
1739 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1740 ixv_media_status);
1741 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1742 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1743
1744 return;
1745 }
1746
1747 static void
1748 ixv_config_link(struct adapter *adapter)
1749 {
1750 struct ixgbe_hw *hw = &adapter->hw;
1751
1752 if (hw->mac.ops.check_link)
1753 hw->mac.ops.check_link(hw, &adapter->link_speed,
1754 &adapter->link_up, FALSE);
1755 }
1756
1757
1758 /*********************************************************************
1759 *
1760 * Enable transmit unit.
1761 *
1762 **********************************************************************/
1763 static void
1764 ixv_initialize_transmit_units(struct adapter *adapter)
1765 {
1766 struct tx_ring *txr = adapter->tx_rings;
1767 struct ixgbe_hw *hw = &adapter->hw;
1768
1769
1770 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1771 u64 tdba = txr->txdma.dma_paddr;
1772 u32 txctrl, txdctl;
1773
1774 /* Set WTHRESH to 8, burst writeback */
1775 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1776 txdctl |= (8 << 16);
1777 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1778
1779 /* Set the HW Tx Head and Tail indices */
1780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1781 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1782
1783 /* Set Tx Tail register */
1784 txr->tail = IXGBE_VFTDT(i);
1785
1786 /* Set Ring parameters */
1787 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1788 (tdba & 0x00000000ffffffffULL));
1789 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1790 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1791 adapter->num_tx_desc *
1792 sizeof(struct ixgbe_legacy_tx_desc));
1793 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1794 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1795 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1796
1797 /* Now enable */
1798 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1799 txdctl |= IXGBE_TXDCTL_ENABLE;
1800 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1801 }
1802
1803 return;
1804 }
1805
1806
1807 /*********************************************************************
1808 *
1809 * Setup receive registers and features.
1810 *
1811 **********************************************************************/
1812 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1813
1814 static void
1815 ixv_initialize_receive_units(struct adapter *adapter)
1816 {
1817 struct rx_ring *rxr = adapter->rx_rings;
1818 struct ixgbe_hw *hw = &adapter->hw;
1819 struct ifnet *ifp = adapter->ifp;
1820 u32 bufsz, rxcsum, psrtype;
1821
1822 if (ifp->if_mtu > ETHERMTU)
1823 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1824 else
1825 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1826
1827 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1828 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1829 IXGBE_PSRTYPE_L2HDR;
1830
1831 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1832
1833 /* Tell PF our max_frame size */
1834 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1835
1836 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1837 u64 rdba = rxr->rxdma.dma_paddr;
1838 u32 reg, rxdctl;
1839
1840 /* Disable the queue */
1841 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1842 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1843 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1844 for (int j = 0; j < 10; j++) {
1845 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1846 IXGBE_RXDCTL_ENABLE)
1847 msec_delay(1);
1848 else
1849 break;
1850 }
1851 wmb();
1852 /* Setup the Base and Length of the Rx Descriptor Ring */
1853 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1854 (rdba & 0x00000000ffffffffULL));
1855 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1856 (rdba >> 32));
1857 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1858 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1859
1860 /* Reset the ring indices */
1861 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1862 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1863
1864 /* Set up the SRRCTL register */
1865 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1866 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1867 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1868 reg |= bufsz;
1869 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1870 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1871
1872 /* Capture Rx Tail index */
1873 rxr->tail = IXGBE_VFRDT(rxr->me);
1874
1875 /* Do the queue enabling last */
1876 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1877 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1878 for (int k = 0; k < 10; k++) {
1879 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1880 IXGBE_RXDCTL_ENABLE)
1881 break;
1882 else
1883 msec_delay(1);
1884 }
1885 wmb();
1886
1887 /* Set the Tail Pointer */
1888 #ifdef DEV_NETMAP
1889 /*
1890 * In netmap mode, we must preserve the buffers made
1891 * available to userspace before the if_init()
1892 * (this is true by default on the TX side, because
1893 * init makes all buffers available to userspace).
1894 *
1895 * netmap_reset() and the device specific routines
1896 * (e.g. ixgbe_setup_receive_rings()) map these
1897 * buffers at the end of the NIC ring, so here we
1898 * must set the RDT (tail) register to make sure
1899 * they are not overwritten.
1900 *
1901 * In this driver the NIC ring starts at RDH = 0,
1902 * RDT points to the last slot available for reception (?),
1903 * so RDT = num_rx_desc - 1 means the whole ring is available.
1904 */
1905 if (ifp->if_capenable & IFCAP_NETMAP) {
1906 struct netmap_adapter *na = NA(adapter->ifp);
1907 struct netmap_kring *kring = &na->rx_rings[i];
1908 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1909
1910 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1911 } else
1912 #endif /* DEV_NETMAP */
1913 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1914 adapter->num_rx_desc - 1);
1915 }
1916
1917 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1918
1919 if (ifp->if_capenable & IFCAP_RXCSUM)
1920 rxcsum |= IXGBE_RXCSUM_PCSD;
1921
1922 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1923 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1924
1925 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1926
1927 return;
1928 }
1929
1930 static void
1931 ixv_setup_vlan_support(struct adapter *adapter)
1932 {
1933 struct ixgbe_hw *hw = &adapter->hw;
1934 u32 ctrl, vid, vfta, retry;
1935 struct rx_ring *rxr;
1936
1937 /*
1938 ** We get here thru init_locked, meaning
1939 ** a soft reset, this has already cleared
1940 ** the VFTA and other state, so if there
1941 ** have been no vlan's registered do nothing.
1942 */
1943 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1944 return;
1945
1946 /* Enable the queues */
1947 for (int i = 0; i < adapter->num_queues; i++) {
1948 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1949 ctrl |= IXGBE_RXDCTL_VME;
1950 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1951 /*
1952 * Let Rx path know that it needs to store VLAN tag
1953 * as part of extra mbuf info.
1954 */
1955 rxr = &adapter->rx_rings[i];
1956 rxr->vtag_strip = TRUE;
1957 }
1958
1959 /*
1960 ** A soft reset zero's out the VFTA, so
1961 ** we need to repopulate it now.
1962 */
1963 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1964 if (ixv_shadow_vfta[i] == 0)
1965 continue;
1966 vfta = ixv_shadow_vfta[i];
1967 /*
1968 ** Reconstruct the vlan id's
1969 ** based on the bits set in each
1970 ** of the array ints.
1971 */
1972 for (int j = 0; j < 32; j++) {
1973 retry = 0;
1974 if ((vfta & (1 << j)) == 0)
1975 continue;
1976 vid = (i * 32) + j;
1977 /* Call the shared code mailbox routine */
1978 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1979 if (++retry > 5)
1980 break;
1981 }
1982 }
1983 }
1984 }
1985
1986 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1987 /*
1988 ** This routine is run via an vlan config EVENT,
1989 ** it enables us to use the HW Filter table since
1990 ** we can get the vlan id. This just creates the
1991 ** entry in the soft version of the VFTA, init will
1992 ** repopulate the real table.
1993 */
1994 static void
1995 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1996 {
1997 struct adapter *adapter = ifp->if_softc;
1998 u16 index, bit;
1999
2000 if (ifp->if_softc != arg) /* Not our event */
2001 return;
2002
2003 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2004 return;
2005
2006 IXGBE_CORE_LOCK(adapter);
2007 index = (vtag >> 5) & 0x7F;
2008 bit = vtag & 0x1F;
2009 ixv_shadow_vfta[index] |= (1 << bit);
2010 /* Re-init to load the changes */
2011 ixv_init_locked(adapter);
2012 IXGBE_CORE_UNLOCK(adapter);
2013 }
2014
2015 /*
2016 ** This routine is run via an vlan
2017 ** unconfig EVENT, remove our entry
2018 ** in the soft vfta.
2019 */
2020 static void
2021 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2022 {
2023 struct adapter *adapter = ifp->if_softc;
2024 u16 index, bit;
2025
2026 if (ifp->if_softc != arg)
2027 return;
2028
2029 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2030 return;
2031
2032 IXGBE_CORE_LOCK(adapter);
2033 index = (vtag >> 5) & 0x7F;
2034 bit = vtag & 0x1F;
2035 ixv_shadow_vfta[index] &= ~(1 << bit);
2036 /* Re-init to load the changes */
2037 ixv_init_locked(adapter);
2038 IXGBE_CORE_UNLOCK(adapter);
2039 }
2040 #endif
2041
2042 static void
2043 ixv_enable_intr(struct adapter *adapter)
2044 {
2045 struct ixgbe_hw *hw = &adapter->hw;
2046 struct ix_queue *que = adapter->queues;
2047 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2048
2049
2050 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
2051
2052 mask = IXGBE_EIMS_ENABLE_MASK;
2053 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
2054 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2055
2056 for (int i = 0; i < adapter->num_queues; i++, que++)
2057 ixv_enable_queue(adapter, que->msix);
2058
2059 IXGBE_WRITE_FLUSH(hw);
2060
2061 return;
2062 }
2063
2064 static void
2065 ixv_disable_intr(struct adapter *adapter)
2066 {
2067 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2068 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
2069 IXGBE_WRITE_FLUSH(&adapter->hw);
2070 return;
2071 }
2072
2073 /*
2074 ** Setup the correct IVAR register for a particular MSIX interrupt
2075 ** - entry is the register array entry
2076 ** - vector is the MSIX vector for this queue
2077 ** - type is RX/TX/MISC
2078 */
2079 static void
2080 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2081 {
2082 struct ixgbe_hw *hw = &adapter->hw;
2083 u32 ivar, index;
2084
2085 vector |= IXGBE_IVAR_ALLOC_VAL;
2086
2087 if (type == -1) { /* MISC IVAR */
2088 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2089 ivar &= ~0xFF;
2090 ivar |= vector;
2091 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2092 } else { /* RX/TX IVARS */
2093 index = (16 * (entry & 1)) + (8 * type);
2094 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2095 ivar &= ~(0xFF << index);
2096 ivar |= (vector << index);
2097 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2098 }
2099 }
2100
2101 static void
2102 ixv_configure_ivars(struct adapter *adapter)
2103 {
2104 struct ix_queue *que = adapter->queues;
2105
2106 for (int i = 0; i < adapter->num_queues; i++, que++) {
2107 /* First the RX queue entry */
2108 ixv_set_ivar(adapter, i, que->msix, 0);
2109 /* ... and the TX */
2110 ixv_set_ivar(adapter, i, que->msix, 1);
2111 /* Set an initial value in EITR */
2112 IXGBE_WRITE_REG(&adapter->hw,
2113 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
2114 }
2115
2116 /* For the mailbox interrupt */
2117 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2118 }
2119
2120
2121 /*
2122 ** Tasklet handler for MSIX MBX interrupts
2123 ** - do outside interrupt since it might sleep
2124 */
2125 static void
2126 ixv_handle_mbx(void *context)
2127 {
2128 struct adapter *adapter = context;
2129
2130 ixgbe_check_link(&adapter->hw,
2131 &adapter->link_speed, &adapter->link_up, 0);
2132 ixv_update_link_status(adapter);
2133 }
2134
2135 /*
2136 ** The VF stats registers never have a truly virgin
2137 ** starting point, so this routine tries to make an
2138 ** artificial one, marking ground zero on attach as
2139 ** it were.
2140 */
2141 static void
2142 ixv_save_stats(struct adapter *adapter)
2143 {
2144 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2145
2146 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2147 stats->saved_reset_vfgprc +=
2148 stats->vfgprc.ev_count - stats->base_vfgprc;
2149 stats->saved_reset_vfgptc +=
2150 stats->vfgptc.ev_count - stats->base_vfgptc;
2151 stats->saved_reset_vfgorc +=
2152 stats->vfgorc.ev_count - stats->base_vfgorc;
2153 stats->saved_reset_vfgotc +=
2154 stats->vfgotc.ev_count - stats->base_vfgotc;
2155 stats->saved_reset_vfmprc +=
2156 stats->vfmprc.ev_count - stats->base_vfmprc;
2157 }
2158 }
2159
2160 static void
2161 ixv_init_stats(struct adapter *adapter)
2162 {
2163 struct ixgbe_hw *hw = &adapter->hw;
2164
2165 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2166 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2167 adapter->stats.vf.last_vfgorc |=
2168 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2169
2170 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2171 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2172 adapter->stats.vf.last_vfgotc |=
2173 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2174
2175 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2176
2177 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2178 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2179 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2180 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2181 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2182 }
2183
2184 #define UPDATE_STAT_32(reg, last, count) \
2185 { \
2186 u32 current = IXGBE_READ_REG(hw, reg); \
2187 if (current < last) \
2188 count.ev_count += 0x100000000LL; \
2189 last = current; \
2190 count.ev_count &= 0xFFFFFFFF00000000LL; \
2191 count.ev_count |= current; \
2192 }
2193
2194 #define UPDATE_STAT_36(lsb, msb, last, count) \
2195 { \
2196 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2197 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2198 u64 current = ((cur_msb << 32) | cur_lsb); \
2199 if (current < last) \
2200 count.ev_count += 0x1000000000LL; \
2201 last = current; \
2202 count.ev_count &= 0xFFFFFFF000000000LL; \
2203 count.ev_count |= current; \
2204 }
2205
2206 /*
2207 ** ixv_update_stats - Update the board statistics counters.
2208 */
2209 void
2210 ixv_update_stats(struct adapter *adapter)
2211 {
2212 struct ixgbe_hw *hw = &adapter->hw;
2213
2214 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2215 adapter->stats.vf.vfgprc);
2216 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2217 adapter->stats.vf.vfgptc);
2218 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2219 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2220 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2221 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2222 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2223 adapter->stats.vf.vfmprc);
2224 }
2225
2226 /**********************************************************************
2227 *
2228 * This routine is called only when em_display_debug_stats is enabled.
2229 * This routine provides a way to take a look at important statistics
2230 * maintained by the driver and hardware.
2231 *
2232 **********************************************************************/
2233 static void
2234 ixv_print_debug_info(struct adapter *adapter)
2235 {
2236 device_t dev = adapter->dev;
2237 struct ixgbe_hw *hw = &adapter->hw;
2238 struct ix_queue *que = adapter->queues;
2239 struct rx_ring *rxr;
2240 struct tx_ring *txr;
2241 #ifdef LRO
2242 struct lro_ctrl *lro;
2243 #endif /* LRO */
2244
2245 device_printf(dev,"Error Byte Count = %u \n",
2246 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2247
2248 for (int i = 0; i < adapter->num_queues; i++, que++) {
2249 txr = que->txr;
2250 rxr = que->rxr;
2251 #ifdef LRO
2252 lro = &rxr->lro;
2253 #endif /* LRO */
2254 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2255 que->msix, (long)que->irqs.ev_count);
2256 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2257 rxr->me, (long long)rxr->rx_packets.ev_count);
2258 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2259 rxr->me, (long)rxr->rx_bytes.ev_count);
2260 #ifdef LRO
2261 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2262 rxr->me, (long long)lro->lro_queued);
2263 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2264 rxr->me, (long long)lro->lro_flushed);
2265 #endif /* LRO */
2266 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2267 txr->me, (long)txr->total_packets.ev_count);
2268 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2269 txr->me, (long)txr->no_desc_avail.ev_count);
2270 }
2271
2272 device_printf(dev,"MBX IRQ Handled: %lu\n",
2273 (long)adapter->link_irq.ev_count);
2274 return;
2275 }
2276
2277 static int
2278 ixv_sysctl_debug(SYSCTLFN_ARGS)
2279 {
2280 struct sysctlnode node;
2281 int error, result;
2282 struct adapter *adapter;
2283
2284 node = *rnode;
2285 adapter = (struct adapter *)node.sysctl_data;
2286 node.sysctl_data = &result;
2287 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2288
2289 if (error)
2290 return error;
2291
2292 if (result == 1)
2293 ixv_print_debug_info(adapter);
2294
2295 return 0;
2296 }
2297
2298 const struct sysctlnode *
2299 ixv_sysctl_instance(struct adapter *adapter)
2300 {
2301 const char *dvname;
2302 struct sysctllog **log;
2303 int rc;
2304 const struct sysctlnode *rnode;
2305
2306 log = &adapter->sysctllog;
2307 dvname = device_xname(adapter->dev);
2308
2309 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2310 0, CTLTYPE_NODE, dvname,
2311 SYSCTL_DESCR("ixv information and settings"),
2312 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2313 goto err;
2314
2315 return rnode;
2316 err:
2317 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2318 return NULL;
2319 }
2320
2321 static void
2322 ixv_add_device_sysctls(struct adapter *adapter)
2323 {
2324 struct sysctllog **log;
2325 const struct sysctlnode *rnode, *cnode;
2326 device_t dev;
2327
2328 dev = adapter->dev;
2329 log = &adapter->sysctllog;
2330
2331 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2332 aprint_error_dev(dev, "could not create sysctl root\n");
2333 return;
2334 }
2335
2336 if (sysctl_createv(log, 0, &rnode, &cnode,
2337 CTLFLAG_READWRITE, CTLTYPE_INT,
2338 "debug", SYSCTL_DESCR("Debug Info"),
2339 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2340 aprint_error_dev(dev, "could not create sysctl\n");
2341
2342 if (sysctl_createv(log, 0, &rnode, &cnode,
2343 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2344 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2345 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2346 aprint_error_dev(dev, "could not create sysctl\n");
2347 }
2348
2349 /*
2350 * Add statistic sysctls for the VF.
2351 */
2352 static void
2353 ixv_add_stats_sysctls(struct adapter *adapter)
2354 {
2355 device_t dev = adapter->dev;
2356 const struct sysctlnode *rnode;
2357 struct sysctllog **log = &adapter->sysctllog;
2358 struct ix_queue *que = &adapter->queues[0];
2359 struct tx_ring *txr = que->txr;
2360 struct rx_ring *rxr = que->rxr;
2361
2362 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2363 const char *xname = device_xname(dev);
2364
2365 /* Driver Statistics */
2366 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2367 NULL, xname, "Handled queue in softint");
2368 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2369 NULL, xname, "Requeued in softint");
2370 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2371 NULL, xname, "Driver tx dma soft fail EFBIG");
2372 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2373 NULL, xname, "m_defrag() failed");
2374 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2375 NULL, xname, "Driver tx dma hard fail EFBIG");
2376 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2377 NULL, xname, "Driver tx dma hard fail EINVAL");
2378 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2379 NULL, xname, "Driver tx dma hard fail other");
2380 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2381 NULL, xname, "Driver tx dma soft fail EAGAIN");
2382 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2383 NULL, xname, "Driver tx dma soft fail ENOMEM");
2384 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2385 NULL, xname, "Watchdog timeouts");
2386 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2387 NULL, xname, "TSO errors");
2388 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2389 NULL, xname, "Link MSIX IRQ Handled");
2390
2391 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2392 snprintf(adapter->queues[i].evnamebuf,
2393 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2394 xname, i);
2395 snprintf(adapter->queues[i].namebuf,
2396 sizeof(adapter->queues[i].namebuf), "q%d", i);
2397
2398 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2399 aprint_error_dev(dev, "could not create sysctl root\n");
2400 break;
2401 }
2402
2403 if (sysctl_createv(log, 0, &rnode, &rnode,
2404 0, CTLTYPE_NODE,
2405 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2406 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2407 break;
2408
2409 #if 0 /* not yet */
2410 if (sysctl_createv(log, 0, &rnode, &cnode,
2411 CTLFLAG_READWRITE, CTLTYPE_INT,
2412 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2413 ixgbe_sysctl_interrupt_rate_handler, 0,
2414 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2415 break;
2416
2417 if (sysctl_createv(log, 0, &rnode, &cnode,
2418 CTLFLAG_READONLY, CTLTYPE_QUAD,
2419 "irqs", SYSCTL_DESCR("irqs on this queue"),
2420 NULL, 0, &(adapter->queues[i].irqs),
2421 0, CTL_CREATE, CTL_EOL) != 0)
2422 break;
2423
2424 if (sysctl_createv(log, 0, &rnode, &cnode,
2425 CTLFLAG_READONLY, CTLTYPE_INT,
2426 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2427 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2428 0, CTL_CREATE, CTL_EOL) != 0)
2429 break;
2430
2431 if (sysctl_createv(log, 0, &rnode, &cnode,
2432 CTLFLAG_READONLY, CTLTYPE_INT,
2433 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2434 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2435 0, CTL_CREATE, CTL_EOL) != 0)
2436 break;
2437 #endif
2438 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2439 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2440 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2441 NULL, adapter->queues[i].evnamebuf, "TSO");
2442 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2443 NULL, adapter->queues[i].evnamebuf,
2444 "Queue No Descriptor Available");
2445 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2446 NULL, adapter->queues[i].evnamebuf,
2447 "Queue Packets Transmitted");
2448 #ifndef IXGBE_LEGACY_TX
2449 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2450 NULL, adapter->queues[i].evnamebuf,
2451 "Packets dropped in pcq");
2452 #endif
2453
2454 #ifdef LRO
2455 struct lro_ctrl *lro = &rxr->lro;
2456 #endif /* LRO */
2457
2458 #if 0 /* not yet */
2459 if (sysctl_createv(log, 0, &rnode, &cnode,
2460 CTLFLAG_READONLY,
2461 CTLTYPE_INT,
2462 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2463 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2464 CTL_CREATE, CTL_EOL) != 0)
2465 break;
2466
2467 if (sysctl_createv(log, 0, &rnode, &cnode,
2468 CTLFLAG_READONLY,
2469 CTLTYPE_INT,
2470 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2471 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2472 CTL_CREATE, CTL_EOL) != 0)
2473 break;
2474 #endif
2475
2476 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2477 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2478 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2479 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2480 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2481 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2482 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2483 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2484 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2485 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2486 #ifdef LRO
2487 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2488 CTLFLAG_RD, &lro->lro_queued, 0,
2489 "LRO Queued");
2490 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2491 CTLFLAG_RD, &lro->lro_flushed, 0,
2492 "LRO Flushed");
2493 #endif /* LRO */
2494 }
2495
2496 /* MAC stats get the own sub node */
2497
2498 snprintf(stats->namebuf,
2499 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2500
2501 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2502 stats->namebuf, "rx csum offload - IP");
2503 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2504 stats->namebuf, "rx csum offload - L4");
2505 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2506 stats->namebuf, "rx csum offload - IP bad");
2507 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2508 stats->namebuf, "rx csum offload - L4 bad");
2509
2510 /* Packet Reception Stats */
2511 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2512 xname, "Good Packets Received");
2513 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2514 xname, "Good Octets Received");
2515 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2516 xname, "Multicast Packets Received");
2517 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2518 xname, "Good Packets Transmitted");
2519 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2520 xname, "Good Octets Transmitted");
2521 }
2522
2523 static void
2524 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2525 const char *description, int *limit, int value)
2526 {
2527 device_t dev = adapter->dev;
2528 struct sysctllog **log;
2529 const struct sysctlnode *rnode, *cnode;
2530
2531 log = &adapter->sysctllog;
2532 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2533 aprint_error_dev(dev, "could not create sysctl root\n");
2534 return;
2535 }
2536 if (sysctl_createv(log, 0, &rnode, &cnode,
2537 CTLFLAG_READWRITE, CTLTYPE_INT,
2538 name, SYSCTL_DESCR(description),
2539 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2540 aprint_error_dev(dev, "could not create sysctl\n");
2541 *limit = value;
2542 }
2543