ixv.c revision 1.54 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
34 /*$NetBSD: ixv.c,v 1.54 2017/02/16 08:01:11 msaitoh Exp $*/
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "vlan.h"
41
42 /*********************************************************************
43 * Driver version
44 *********************************************************************/
45 char ixv_driver_version[] = "1.4.6-k";
46
47 /*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 /* required last entry */
64 {0, 0, 0, 0, 0}
65 };
66
67 /*********************************************************************
68 * Table of branding strings
69 *********************************************************************/
70
71 static const char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /*********************************************************************
76 * Function prototypes
77 *********************************************************************/
78 static int ixv_probe(device_t, cfdata_t, void *);
79 static void ixv_attach(device_t, device_t, void *);
80 static int ixv_detach(device_t, int);
81 #if 0
82 static int ixv_shutdown(device_t);
83 #endif
84 static int ixv_ioctl(struct ifnet *, u_long, void *);
85 static int ixv_init(struct ifnet *);
86 static void ixv_init_locked(struct adapter *);
87 static void ixv_stop(void *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static void ixv_identify_hardware(struct adapter *);
91 static int ixv_allocate_pci_resources(struct adapter *,
92 const struct pci_attach_args *);
93 static int ixv_allocate_msix(struct adapter *,
94 const struct pci_attach_args *);
95 static int ixv_setup_msix(struct adapter *);
96 static void ixv_free_pci_resources(struct adapter *);
97 static void ixv_local_timer(void *);
98 static void ixv_local_timer_locked(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
101
102 static void ixv_initialize_transmit_units(struct adapter *);
103 static void ixv_initialize_receive_units(struct adapter *);
104
105 static void ixv_enable_intr(struct adapter *);
106 static void ixv_disable_intr(struct adapter *);
107 static void ixv_set_multi(struct adapter *);
108 static void ixv_update_link_status(struct adapter *);
109 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
110 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
111 static void ixv_configure_ivars(struct adapter *);
112 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
113
114 static void ixv_setup_vlan_support(struct adapter *);
115 #if 0
116 static void ixv_register_vlan(void *, struct ifnet *, u16);
117 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
118 #endif
119
120 static void ixv_add_device_sysctls(struct adapter *);
121 static void ixv_save_stats(struct adapter *);
122 static void ixv_init_stats(struct adapter *);
123 static void ixv_update_stats(struct adapter *);
124 static void ixv_add_stats_sysctls(struct adapter *);
125 static void ixv_set_sysctl_value(struct adapter *, const char *,
126 const char *, int *, int);
127
128 /* The MSI/X Interrupt handlers */
129 static int ixv_msix_que(void *);
130 static int ixv_msix_mbx(void *);
131
132 /* Deferred interrupt tasklets */
133 static void ixv_handle_que(void *);
134 static void ixv_handle_mbx(void *);
135
136 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
137 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
138
139 #ifdef DEV_NETMAP
140 /*
141 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
142 * if_ix.c.
143 */
144 extern void ixgbe_netmap_attach(struct adapter *adapter);
145
146 #include <net/netmap.h>
147 #include <sys/selinfo.h>
148 #include <dev/netmap/netmap_kern.h>
149 #endif /* DEV_NETMAP */
150
151 /*********************************************************************
152 * FreeBSD Device Interface Entry Points
153 *********************************************************************/
154
155 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
156 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
157 DVF_DETACH_SHUTDOWN);
158
159 # if 0
160 static device_method_t ixv_methods[] = {
161 /* Device interface */
162 DEVMETHOD(device_probe, ixv_probe),
163 DEVMETHOD(device_attach, ixv_attach),
164 DEVMETHOD(device_detach, ixv_detach),
165 DEVMETHOD(device_shutdown, ixv_shutdown),
166 DEVMETHOD_END
167 };
168 #endif
169
170 #if 0
171 static driver_t ixv_driver = {
172 "ixv", ixv_methods, sizeof(struct adapter),
173 };
174
175 devclass_t ixv_devclass;
176 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
177 MODULE_DEPEND(ixv, pci, 1, 1, 1);
178 MODULE_DEPEND(ixv, ether, 1, 1, 1);
179 #ifdef DEV_NETMAP
180 MODULE_DEPEND(ix, netmap, 1, 1, 1);
181 #endif /* DEV_NETMAP */
182 /* XXX depend on 'ix' ? */
183 #endif
184
185 /*
186 ** TUNEABLE PARAMETERS:
187 */
188
189 /* Number of Queues - do not exceed MSIX vectors - 1 */
190 static int ixv_num_queues = 0;
191 #define TUNABLE_INT(__x, __y)
192 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
193
194 /*
195 ** AIM: Adaptive Interrupt Moderation
196 ** which means that the interrupt rate
197 ** is varied over time based on the
198 ** traffic for that interrupt vector
199 */
200 static bool ixv_enable_aim = false;
201 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
202
203 /* How many packets rxeof tries to clean at a time */
204 static int ixv_rx_process_limit = 256;
205 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
206
207 /* How many packets txeof tries to clean at a time */
208 static int ixv_tx_process_limit = 256;
209 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
210
211 /*
212 ** Number of TX descriptors per ring,
213 ** setting higher than RX as this seems
214 ** the better performing choice.
215 */
216 static int ixv_txd = DEFAULT_TXD;
217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
218
219 /* Number of RX descriptors per ring */
220 static int ixv_rxd = DEFAULT_RXD;
221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
222
223 /*
224 ** Shadow VFTA table, this is needed because
225 ** the real filter table gets cleared during
226 ** a soft reset and we need to repopulate it.
227 */
228 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
229
230 /*********************************************************************
231 * Device identification routine
232 *
233 * ixv_probe determines if the driver should be loaded on
234 * adapter based on PCI vendor/device id of the adapter.
235 *
236 * return 1 on success, 0 on failure
237 *********************************************************************/
238
239 static int
240 ixv_probe(device_t dev, cfdata_t cf, void *aux)
241 {
242 #ifdef __HAVE_PCI_MSI_MSIX
243 const struct pci_attach_args *pa = aux;
244
245 return (ixv_lookup(pa) != NULL) ? 1 : 0;
246 #else
247 return 0;
248 #endif
249 }
250
251 static ixgbe_vendor_info_t *
252 ixv_lookup(const struct pci_attach_args *pa)
253 {
254 pcireg_t subid;
255 ixgbe_vendor_info_t *ent;
256
257 INIT_DEBUGOUT("ixv_lookup: begin");
258
259 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
260 return NULL;
261
262 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
263
264 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
265 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
266 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
267
268 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
269 (ent->subvendor_id == 0)) &&
270
271 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
272 (ent->subdevice_id == 0))) {
273 return ent;
274 }
275 }
276 return NULL;
277 }
278
279
280 /*********************************************************************
281 * Device initialization routine
282 *
283 * The attach entry point is called when the driver is being loaded.
284 * This routine identifies the type of hardware, allocates all resources
285 * and initializes the hardware.
286 *
287 * return 0 on success, positive on failure
288 *********************************************************************/
289
290 static void
291 ixv_attach(device_t parent, device_t dev, void *aux)
292 {
293 struct adapter *adapter;
294 struct ixgbe_hw *hw;
295 int error = 0;
296 ixgbe_vendor_info_t *ent;
297 const struct pci_attach_args *pa = aux;
298
299 INIT_DEBUGOUT("ixv_attach: begin");
300
301 /* Allocate, clear, and link in our adapter structure */
302 adapter = device_private(dev);
303 adapter->dev = dev;
304 hw = &adapter->hw;
305
306 #ifdef DEV_NETMAP
307 adapter->init_locked = ixv_init_locked;
308 adapter->stop_locked = ixv_stop;
309 #endif
310
311 adapter->osdep.pc = pa->pa_pc;
312 adapter->osdep.tag = pa->pa_tag;
313 if (pci_dma64_available(pa))
314 adapter->osdep.dmat = pa->pa_dmat64;
315 else
316 adapter->osdep.dmat = pa->pa_dmat;
317 adapter->osdep.attached = false;
318
319 ent = ixv_lookup(pa);
320
321 KASSERT(ent != NULL);
322
323 aprint_normal(": %s, Version - %s\n",
324 ixv_strings[ent->index], ixv_driver_version);
325
326 /* Core Lock Init*/
327 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
328
329 /* Set up the timer callout */
330 callout_init(&adapter->timer, 0);
331
332 /* Determine hardware revision */
333 ixv_identify_hardware(adapter);
334
335 /* Do base PCI setup - map BAR0 */
336 if (ixv_allocate_pci_resources(adapter, pa)) {
337 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
338 error = ENXIO;
339 goto err_out;
340 }
341
342 /* Sysctls for limiting the amount of work done in the taskqueues */
343 ixv_set_sysctl_value(adapter, "rx_processing_limit",
344 "max number of rx packets to process",
345 &adapter->rx_process_limit, ixv_rx_process_limit);
346
347 ixv_set_sysctl_value(adapter, "tx_processing_limit",
348 "max number of tx packets to process",
349 &adapter->tx_process_limit, ixv_tx_process_limit);
350
351 /* Do descriptor calc and sanity checks */
352 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
353 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
354 aprint_error_dev(dev, "TXD config issue, using default!\n");
355 adapter->num_tx_desc = DEFAULT_TXD;
356 } else
357 adapter->num_tx_desc = ixv_txd;
358
359 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
360 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
361 aprint_error_dev(dev, "RXD config issue, using default!\n");
362 adapter->num_rx_desc = DEFAULT_RXD;
363 } else
364 adapter->num_rx_desc = ixv_rxd;
365
366 /* Allocate our TX/RX Queues */
367 if (ixgbe_allocate_queues(adapter)) {
368 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
369 error = ENOMEM;
370 goto err_out;
371 }
372
373 /*
374 ** Initialize the shared code: its
375 ** at this point the mac type is set.
376 */
377 error = ixgbe_init_shared_code(hw);
378 if (error) {
379 aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
380 error = EIO;
381 goto err_late;
382 }
383
384 /* Setup the mailbox */
385 ixgbe_init_mbx_params_vf(hw);
386
387 /* Reset mbox api to 1.0 */
388 error = ixgbe_reset_hw(hw);
389 if (error == IXGBE_ERR_RESET_FAILED)
390 aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
391 else if (error)
392 aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
393 if (error) {
394 error = EIO;
395 goto err_late;
396 }
397
398 /* Negotiate mailbox API version */
399 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
400 if (error)
401 aprint_debug_dev(dev,
402 "MBX API 1.1 negotiation failed! Error %d\n", error);
403
404 error = ixgbe_init_hw(hw);
405 if (error) {
406 aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
407 error = EIO;
408 goto err_late;
409 }
410
411 error = ixv_allocate_msix(adapter, pa);
412 if (error) {
413 device_printf(dev, "ixv_allocate_msix() failed!\n");
414 goto err_late;
415 }
416
417 /* If no mac address was assigned, make a random one */
418 if (!ixv_check_ether_addr(hw->mac.addr)) {
419 u8 addr[ETHER_ADDR_LEN];
420 uint64_t rndval = cprng_fast64();
421
422 memcpy(addr, &rndval, sizeof(addr));
423 addr[0] &= 0xFE;
424 addr[0] |= 0x02;
425 bcopy(addr, hw->mac.addr, sizeof(addr));
426 }
427
428 /* hw.ix defaults init */
429 adapter->enable_aim = ixv_enable_aim;
430
431 /* Setup OS specific network interface */
432 ixv_setup_interface(dev, adapter);
433
434 /* Do the stats setup */
435 ixv_save_stats(adapter);
436 ixv_init_stats(adapter);
437
438 /* Register for VLAN events */
439 #if 0 /* XXX delete after write? */
440 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
441 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
442 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
443 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
444 #endif
445
446 /* Add sysctls */
447 ixv_add_device_sysctls(adapter);
448 ixv_add_stats_sysctls(adapter);
449
450 #ifdef DEV_NETMAP
451 ixgbe_netmap_attach(adapter);
452 #endif /* DEV_NETMAP */
453 INIT_DEBUGOUT("ixv_attach: end");
454 adapter->osdep.attached = true;
455 return;
456
457 err_late:
458 ixgbe_free_transmit_structures(adapter);
459 ixgbe_free_receive_structures(adapter);
460 err_out:
461 ixv_free_pci_resources(adapter);
462 return;
463
464 }
465
466 /*********************************************************************
467 * Device removal routine
468 *
469 * The detach entry point is called when the driver is being removed.
470 * This routine stops the adapter and deallocates all the resources
471 * that were allocated for driver operation.
472 *
473 * return 0 on success, positive on failure
474 *********************************************************************/
475
476 static int
477 ixv_detach(device_t dev, int flags)
478 {
479 struct adapter *adapter = device_private(dev);
480 struct ix_queue *que = adapter->queues;
481 struct tx_ring *txr = adapter->tx_rings;
482 struct rx_ring *rxr = adapter->rx_rings;
483 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
484
485 INIT_DEBUGOUT("ixv_detach: begin");
486 if (adapter->osdep.attached == false)
487 return 0;
488
489 #if NVLAN > 0
490 /* Make sure VLANS are not using driver */
491 if (!VLAN_ATTACHED(&adapter->osdep.ec))
492 ; /* nothing to do: no VLANs */
493 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
494 vlan_ifdetach(adapter->ifp);
495 else {
496 aprint_error_dev(dev, "VLANs in use, detach first\n");
497 return EBUSY;
498 }
499 #endif
500
501 IXGBE_CORE_LOCK(adapter);
502 ixv_stop(adapter);
503 IXGBE_CORE_UNLOCK(adapter);
504
505 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
506 #ifndef IXGBE_LEGACY_TX
507 softint_disestablish(txr->txr_si);
508 #endif
509 softint_disestablish(que->que_si);
510 }
511
512 /* Drain the Mailbox(link) queue */
513 softint_disestablish(adapter->link_si);
514
515 /* Unregister VLAN events */
516 #if 0 /* XXX msaitoh delete after write? */
517 if (adapter->vlan_attach != NULL)
518 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
519 if (adapter->vlan_detach != NULL)
520 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
521 #endif
522
523 ether_ifdetach(adapter->ifp);
524 callout_halt(&adapter->timer, NULL);
525 #ifdef DEV_NETMAP
526 netmap_detach(adapter->ifp);
527 #endif /* DEV_NETMAP */
528 ixv_free_pci_resources(adapter);
529 #if 0 /* XXX the NetBSD port is probably missing something here */
530 bus_generic_detach(dev);
531 #endif
532 if_detach(adapter->ifp);
533 if_percpuq_destroy(adapter->ipq);
534
535 sysctl_teardown(&adapter->sysctllog);
536 evcnt_detach(&adapter->handleq);
537 evcnt_detach(&adapter->req);
538 evcnt_detach(&adapter->efbig_tx_dma_setup);
539 evcnt_detach(&adapter->mbuf_defrag_failed);
540 evcnt_detach(&adapter->efbig2_tx_dma_setup);
541 evcnt_detach(&adapter->einval_tx_dma_setup);
542 evcnt_detach(&adapter->other_tx_dma_setup);
543 evcnt_detach(&adapter->eagain_tx_dma_setup);
544 evcnt_detach(&adapter->enomem_tx_dma_setup);
545 evcnt_detach(&adapter->watchdog_events);
546 evcnt_detach(&adapter->tso_err);
547 evcnt_detach(&adapter->link_irq);
548
549 txr = adapter->tx_rings;
550 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
551 evcnt_detach(&adapter->queues[i].irqs);
552 evcnt_detach(&txr->no_desc_avail);
553 evcnt_detach(&txr->total_packets);
554 evcnt_detach(&txr->tso_tx);
555 #ifndef IXGBE_LEGACY_TX
556 evcnt_detach(&txr->pcq_drops);
557 #endif
558
559 evcnt_detach(&rxr->rx_packets);
560 evcnt_detach(&rxr->rx_bytes);
561 evcnt_detach(&rxr->rx_copies);
562 evcnt_detach(&rxr->no_jmbuf);
563 evcnt_detach(&rxr->rx_discarded);
564 }
565 evcnt_detach(&stats->ipcs);
566 evcnt_detach(&stats->l4cs);
567 evcnt_detach(&stats->ipcs_bad);
568 evcnt_detach(&stats->l4cs_bad);
569
570 /* Packet Reception Stats */
571 evcnt_detach(&stats->vfgorc);
572 evcnt_detach(&stats->vfgprc);
573 evcnt_detach(&stats->vfmprc);
574
575 /* Packet Transmission Stats */
576 evcnt_detach(&stats->vfgotc);
577 evcnt_detach(&stats->vfgptc);
578
579 ixgbe_free_transmit_structures(adapter);
580 ixgbe_free_receive_structures(adapter);
581
582 IXGBE_CORE_LOCK_DESTROY(adapter);
583 return (0);
584 }
585
586 /*********************************************************************
587 *
588 * Shutdown entry point
589 *
590 **********************************************************************/
591 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
592 static int
593 ixv_shutdown(device_t dev)
594 {
595 struct adapter *adapter = device_private(dev);
596 IXGBE_CORE_LOCK(adapter);
597 ixv_stop(adapter);
598 IXGBE_CORE_UNLOCK(adapter);
599 return (0);
600 }
601 #endif
602
603 static int
604 ixv_ifflags_cb(struct ethercom *ec)
605 {
606 struct ifnet *ifp = &ec->ec_if;
607 struct adapter *adapter = ifp->if_softc;
608 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
609
610 IXGBE_CORE_LOCK(adapter);
611
612 if (change != 0)
613 adapter->if_flags = ifp->if_flags;
614
615 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
616 rc = ENETRESET;
617
618 IXGBE_CORE_UNLOCK(adapter);
619
620 return rc;
621 }
622
623 /*********************************************************************
624 * Ioctl entry point
625 *
626 * ixv_ioctl is called when the user wants to configure the
627 * interface.
628 *
629 * return 0 on success, positive on failure
630 **********************************************************************/
631
632 static int
633 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
634 {
635 struct adapter *adapter = ifp->if_softc;
636 struct ifcapreq *ifcr = data;
637 struct ifreq *ifr = (struct ifreq *) data;
638 int error = 0;
639 int l4csum_en;
640 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
641 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
642
643 switch (command) {
644 case SIOCSIFFLAGS:
645 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
646 break;
647 case SIOCADDMULTI:
648 case SIOCDELMULTI:
649 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
650 break;
651 case SIOCSIFMEDIA:
652 case SIOCGIFMEDIA:
653 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
654 break;
655 case SIOCSIFCAP:
656 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
657 break;
658 case SIOCSIFMTU:
659 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
660 break;
661 default:
662 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
663 break;
664 }
665
666 switch (command) {
667 case SIOCSIFMEDIA:
668 case SIOCGIFMEDIA:
669 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
670 case SIOCSIFCAP:
671 /* Layer-4 Rx checksum offload has to be turned on and
672 * off as a unit.
673 */
674 l4csum_en = ifcr->ifcr_capenable & l4csum;
675 if (l4csum_en != l4csum && l4csum_en != 0)
676 return EINVAL;
677 /*FALLTHROUGH*/
678 case SIOCADDMULTI:
679 case SIOCDELMULTI:
680 case SIOCSIFFLAGS:
681 case SIOCSIFMTU:
682 default:
683 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
684 return error;
685 if ((ifp->if_flags & IFF_RUNNING) == 0)
686 ;
687 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
688 IXGBE_CORE_LOCK(adapter);
689 ixv_init_locked(adapter);
690 IXGBE_CORE_UNLOCK(adapter);
691 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
692 /*
693 * Multicast list has changed; set the hardware filter
694 * accordingly.
695 */
696 IXGBE_CORE_LOCK(adapter);
697 ixv_disable_intr(adapter);
698 ixv_set_multi(adapter);
699 ixv_enable_intr(adapter);
700 IXGBE_CORE_UNLOCK(adapter);
701 }
702 return 0;
703 }
704 }
705
706 /*********************************************************************
707 * Init entry point
708 *
709 * This routine is used in two ways. It is used by the stack as
710 * init entry point in network interface structure. It is also used
711 * by the driver as a hw/sw initialization routine to get to a
712 * consistent state.
713 *
714 * return 0 on success, positive on failure
715 **********************************************************************/
716 #define IXGBE_MHADD_MFS_SHIFT 16
717
718 static void
719 ixv_init_locked(struct adapter *adapter)
720 {
721 struct ifnet *ifp = adapter->ifp;
722 device_t dev = adapter->dev;
723 struct ixgbe_hw *hw = &adapter->hw;
724 int error = 0;
725
726 INIT_DEBUGOUT("ixv_init_locked: begin");
727 KASSERT(mutex_owned(&adapter->core_mtx));
728 hw->adapter_stopped = FALSE;
729 ixgbe_stop_adapter(hw);
730 callout_stop(&adapter->timer);
731
732 /* reprogram the RAR[0] in case user changed it. */
733 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
734
735 /* Get the latest mac address, User can use a LAA */
736 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
737 IXGBE_ETH_LENGTH_OF_ADDRESS);
738 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
739 hw->addr_ctrl.rar_used_count = 1;
740
741 /* Prepare transmit descriptors and buffers */
742 if (ixgbe_setup_transmit_structures(adapter)) {
743 aprint_error_dev(dev, "Could not setup transmit structures\n");
744 ixv_stop(adapter);
745 return;
746 }
747
748 /* Reset VF and renegotiate mailbox API version */
749 ixgbe_reset_hw(hw);
750 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
751 if (error)
752 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
753
754 ixv_initialize_transmit_units(adapter);
755
756 /* Setup Multicast table */
757 ixv_set_multi(adapter);
758
759 /*
760 ** Determine the correct mbuf pool
761 ** for doing jumbo/headersplit
762 */
763 if (ifp->if_mtu > ETHERMTU)
764 adapter->rx_mbuf_sz = MJUMPAGESIZE;
765 else
766 adapter->rx_mbuf_sz = MCLBYTES;
767
768 /* Prepare receive descriptors and buffers */
769 if (ixgbe_setup_receive_structures(adapter)) {
770 device_printf(dev, "Could not setup receive structures\n");
771 ixv_stop(adapter);
772 return;
773 }
774
775 /* Configure RX settings */
776 ixv_initialize_receive_units(adapter);
777
778 #if 0 /* XXX isn't it required? -- msaitoh */
779 /* Set the various hardware offload abilities */
780 ifp->if_hwassist = 0;
781 if (ifp->if_capenable & IFCAP_TSO4)
782 ifp->if_hwassist |= CSUM_TSO;
783 if (ifp->if_capenable & IFCAP_TXCSUM) {
784 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
785 #if __FreeBSD_version >= 800000
786 ifp->if_hwassist |= CSUM_SCTP;
787 #endif
788 }
789 #endif
790
791 /* Set up VLAN offload and filter */
792 ixv_setup_vlan_support(adapter);
793
794 /* Set up MSI/X routing */
795 ixv_configure_ivars(adapter);
796
797 /* Set up auto-mask */
798 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
799
800 /* Set moderation on the Link interrupt */
801 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
802
803 /* Stats init */
804 ixv_init_stats(adapter);
805
806 /* Config/Enable Link */
807 ixv_config_link(adapter);
808 hw->mac.get_link_status = TRUE;
809
810 /* Start watchdog */
811 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
812
813 /* And now turn on interrupts */
814 ixv_enable_intr(adapter);
815
816 /* Now inform the stack we're ready */
817 ifp->if_flags |= IFF_RUNNING;
818 ifp->if_flags &= ~IFF_OACTIVE;
819
820 return;
821 }
822
823 static int
824 ixv_init(struct ifnet *ifp)
825 {
826 struct adapter *adapter = ifp->if_softc;
827
828 IXGBE_CORE_LOCK(adapter);
829 ixv_init_locked(adapter);
830 IXGBE_CORE_UNLOCK(adapter);
831 return 0;
832 }
833
834
835 /*
836 **
837 ** MSIX Interrupt Handlers and Tasklets
838 **
839 */
840
841 static inline void
842 ixv_enable_queue(struct adapter *adapter, u32 vector)
843 {
844 struct ixgbe_hw *hw = &adapter->hw;
845 u32 queue = 1 << vector;
846 u32 mask;
847
848 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
849 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
850 }
851
852 static inline void
853 ixv_disable_queue(struct adapter *adapter, u32 vector)
854 {
855 struct ixgbe_hw *hw = &adapter->hw;
856 u64 queue = (u64)(1 << vector);
857 u32 mask;
858
859 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
860 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
861 }
862
863 static inline void
864 ixv_rearm_queues(struct adapter *adapter, u64 queues)
865 {
866 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
867 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
868 }
869
870
871 static void
872 ixv_handle_que(void *context)
873 {
874 struct ix_queue *que = context;
875 struct adapter *adapter = que->adapter;
876 struct tx_ring *txr = que->txr;
877 struct ifnet *ifp = adapter->ifp;
878 bool more;
879
880 adapter->handleq.ev_count++;
881
882 if (ifp->if_flags & IFF_RUNNING) {
883 more = ixgbe_rxeof(que);
884 IXGBE_TX_LOCK(txr);
885 ixgbe_txeof(txr);
886 #ifndef IXGBE_LEGACY_TX
887 if (pcq_peek(txr->txr_interq) != NULL)
888 ixgbe_mq_start_locked(ifp, txr);
889 #endif
890 /* Only for queue 0 */
891 if ((&adapter->queues[0] == que)
892 && (!IFQ_IS_EMPTY(&ifp->if_snd)))
893 ixgbe_start_locked(txr, ifp);
894 IXGBE_TX_UNLOCK(txr);
895 if (more) {
896 adapter->req.ev_count++;
897 softint_schedule(que->que_si);
898 return;
899 }
900 }
901
902 /* Reenable this interrupt */
903 ixv_enable_queue(adapter, que->msix);
904 return;
905 }
906
907 /*********************************************************************
908 *
909 * MSI Queue Interrupt Service routine
910 *
911 **********************************************************************/
912 int
913 ixv_msix_que(void *arg)
914 {
915 struct ix_queue *que = arg;
916 struct adapter *adapter = que->adapter;
917 #ifdef IXGBE_LEGACY_TX
918 struct ifnet *ifp = adapter->ifp;
919 #endif
920 struct tx_ring *txr = que->txr;
921 struct rx_ring *rxr = que->rxr;
922 bool more;
923 u32 newitr = 0;
924
925 ixv_disable_queue(adapter, que->msix);
926 ++que->irqs.ev_count;
927
928 #ifdef __NetBSD__
929 /* Don't run ixgbe_rxeof in interrupt context */
930 more = true;
931 #else
932 more = ixgbe_rxeof(que);
933 #endif
934
935 IXGBE_TX_LOCK(txr);
936 ixgbe_txeof(txr);
937 IXGBE_TX_UNLOCK(txr);
938
939 /* Do AIM now? */
940
941 if (adapter->enable_aim == false)
942 goto no_calc;
943 /*
944 ** Do Adaptive Interrupt Moderation:
945 ** - Write out last calculated setting
946 ** - Calculate based on average size over
947 ** the last interval.
948 */
949 if (que->eitr_setting)
950 IXGBE_WRITE_REG(&adapter->hw,
951 IXGBE_VTEITR(que->msix),
952 que->eitr_setting);
953
954 que->eitr_setting = 0;
955
956 /* Idle, do nothing */
957 if ((txr->bytes == 0) && (rxr->bytes == 0))
958 goto no_calc;
959
960 if ((txr->bytes) && (txr->packets))
961 newitr = txr->bytes/txr->packets;
962 if ((rxr->bytes) && (rxr->packets))
963 newitr = max(newitr,
964 (rxr->bytes / rxr->packets));
965 newitr += 24; /* account for hardware frame, crc */
966
967 /* set an upper boundary */
968 newitr = min(newitr, 3000);
969
970 /* Be nice to the mid range */
971 if ((newitr > 300) && (newitr < 1200))
972 newitr = (newitr / 3);
973 else
974 newitr = (newitr / 2);
975
976 newitr |= newitr << 16;
977
978 /* save for next interrupt */
979 que->eitr_setting = newitr;
980
981 /* Reset state */
982 txr->bytes = 0;
983 txr->packets = 0;
984 rxr->bytes = 0;
985 rxr->packets = 0;
986
987 no_calc:
988 if (more)
989 softint_schedule(que->que_si);
990 else /* Reenable this interrupt */
991 ixv_enable_queue(adapter, que->msix);
992 return 1;
993 }
994
995 static int
996 ixv_msix_mbx(void *arg)
997 {
998 struct adapter *adapter = arg;
999 struct ixgbe_hw *hw = &adapter->hw;
1000 u32 reg;
1001
1002 ++adapter->link_irq.ev_count;
1003
1004 /* First get the cause */
1005 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1006 /* Clear interrupt with write */
1007 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1008
1009 /* Link status change */
1010 if (reg & IXGBE_EICR_LSC)
1011 softint_schedule(adapter->link_si);
1012
1013 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1014 return 1;
1015 }
1016
1017 /*********************************************************************
1018 *
1019 * Media Ioctl callback
1020 *
1021 * This routine is called whenever the user queries the status of
1022 * the interface using ifconfig.
1023 *
1024 **********************************************************************/
1025 static void
1026 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1027 {
1028 struct adapter *adapter = ifp->if_softc;
1029
1030 INIT_DEBUGOUT("ixv_media_status: begin");
1031 IXGBE_CORE_LOCK(adapter);
1032 ixv_update_link_status(adapter);
1033
1034 ifmr->ifm_status = IFM_AVALID;
1035 ifmr->ifm_active = IFM_ETHER;
1036
1037 if (!adapter->link_active) {
1038 ifmr->ifm_active |= IFM_NONE;
1039 IXGBE_CORE_UNLOCK(adapter);
1040 return;
1041 }
1042
1043 ifmr->ifm_status |= IFM_ACTIVE;
1044
1045 switch (adapter->link_speed) {
1046 case IXGBE_LINK_SPEED_10GB_FULL:
1047 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1048 break;
1049 case IXGBE_LINK_SPEED_1GB_FULL:
1050 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1051 break;
1052 case IXGBE_LINK_SPEED_100_FULL:
1053 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1054 break;
1055 }
1056
1057 IXGBE_CORE_UNLOCK(adapter);
1058
1059 return;
1060 }
1061
1062 /*********************************************************************
1063 *
1064 * Media Ioctl callback
1065 *
1066 * This routine is called when the user changes speed/duplex using
1067 * media/mediopt option with ifconfig.
1068 *
1069 **********************************************************************/
1070 static int
1071 ixv_media_change(struct ifnet * ifp)
1072 {
1073 struct adapter *adapter = ifp->if_softc;
1074 struct ifmedia *ifm = &adapter->media;
1075
1076 INIT_DEBUGOUT("ixv_media_change: begin");
1077
1078 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1079 return (EINVAL);
1080
1081 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1082 case IFM_AUTO:
1083 break;
1084 default:
1085 device_printf(adapter->dev, "Only auto media type\n");
1086 return (EINVAL);
1087 }
1088
1089 return (0);
1090 }
1091
1092
1093 /*********************************************************************
1094 * Multicast Update
1095 *
1096 * This routine is called whenever multicast address list is updated.
1097 *
1098 **********************************************************************/
1099 #define IXGBE_RAR_ENTRIES 16
1100
1101 static void
1102 ixv_set_multi(struct adapter *adapter)
1103 {
1104 struct ether_multi *enm;
1105 struct ether_multistep step;
1106 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1107 u8 *update_ptr;
1108 int mcnt = 0;
1109 struct ethercom *ec = &adapter->osdep.ec;
1110
1111 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1112
1113 ETHER_FIRST_MULTI(step, ec, enm);
1114 while (enm != NULL) {
1115 bcopy(enm->enm_addrlo,
1116 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1117 IXGBE_ETH_LENGTH_OF_ADDRESS);
1118 mcnt++;
1119 /* XXX This might be required --msaitoh */
1120 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1121 break;
1122 ETHER_NEXT_MULTI(step, enm);
1123 }
1124
1125 update_ptr = mta;
1126
1127 ixgbe_update_mc_addr_list(&adapter->hw,
1128 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1129
1130 return;
1131 }
1132
1133 /*
1134 * This is an iterator function now needed by the multicast
1135 * shared code. It simply feeds the shared code routine the
1136 * addresses in the array of ixv_set_multi() one by one.
1137 */
1138 static u8 *
1139 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1140 {
1141 u8 *addr = *update_ptr;
1142 u8 *newptr;
1143 *vmdq = 0;
1144
1145 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1146 *update_ptr = newptr;
1147 return addr;
1148 }
1149
1150 /*********************************************************************
1151 * Timer routine
1152 *
1153 * This routine checks for link status,updates statistics,
1154 * and runs the watchdog check.
1155 *
1156 **********************************************************************/
1157
1158 static void
1159 ixv_local_timer(void *arg)
1160 {
1161 struct adapter *adapter = arg;
1162
1163 IXGBE_CORE_LOCK(adapter);
1164 ixv_local_timer_locked(adapter);
1165 IXGBE_CORE_UNLOCK(adapter);
1166 }
1167
1168 static void
1169 ixv_local_timer_locked(void *arg)
1170 {
1171 struct adapter *adapter = arg;
1172 device_t dev = adapter->dev;
1173 struct ix_queue *que = adapter->queues;
1174 u64 queues = 0;
1175 int hung = 0;
1176
1177 KASSERT(mutex_owned(&adapter->core_mtx));
1178
1179 ixv_update_link_status(adapter);
1180
1181 /* Stats Update */
1182 ixv_update_stats(adapter);
1183
1184 /*
1185 ** Check the TX queues status
1186 ** - mark hung queues so we don't schedule on them
1187 ** - watchdog only if all queues show hung
1188 */
1189 for (int i = 0; i < adapter->num_queues; i++, que++) {
1190 /* Keep track of queues with work for soft irq */
1191 if (que->txr->busy)
1192 queues |= ((u64)1 << que->me);
1193 /*
1194 ** Each time txeof runs without cleaning, but there
1195 ** are uncleaned descriptors it increments busy. If
1196 ** we get to the MAX we declare it hung.
1197 */
1198 if (que->busy == IXGBE_QUEUE_HUNG) {
1199 ++hung;
1200 /* Mark the queue as inactive */
1201 adapter->active_queues &= ~((u64)1 << que->me);
1202 continue;
1203 } else {
1204 /* Check if we've come back from hung */
1205 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1206 adapter->active_queues |= ((u64)1 << que->me);
1207 }
1208 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1209 device_printf(dev,"Warning queue %d "
1210 "appears to be hung!\n", i);
1211 que->txr->busy = IXGBE_QUEUE_HUNG;
1212 ++hung;
1213 }
1214
1215 }
1216
1217 /* Only truly watchdog if all queues show hung */
1218 if (hung == adapter->num_queues)
1219 goto watchdog;
1220 else if (queues != 0) { /* Force an IRQ on queues with work */
1221 ixv_rearm_queues(adapter, queues);
1222 }
1223
1224 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1225 return;
1226
1227 watchdog:
1228 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1229 adapter->ifp->if_flags &= ~IFF_RUNNING;
1230 adapter->watchdog_events.ev_count++;
1231 ixv_init_locked(adapter);
1232 }
1233
1234 /*
1235 ** Note: this routine updates the OS on the link state
1236 ** the real check of the hardware only happens with
1237 ** a link interrupt.
1238 */
1239 static void
1240 ixv_update_link_status(struct adapter *adapter)
1241 {
1242 struct ifnet *ifp = adapter->ifp;
1243 device_t dev = adapter->dev;
1244
1245 if (adapter->link_up){
1246 if (adapter->link_active == FALSE) {
1247 if (bootverbose) {
1248 const char *bpsmsg;
1249
1250 switch (adapter->link_speed) {
1251 case IXGBE_LINK_SPEED_10GB_FULL:
1252 bpsmsg = "10 Gbps";
1253 break;
1254 case IXGBE_LINK_SPEED_1GB_FULL:
1255 bpsmsg = "1 Gbps";
1256 break;
1257 case IXGBE_LINK_SPEED_100_FULL:
1258 bpsmsg = "100 Mbps";
1259 break;
1260 default:
1261 bpsmsg = "unknown speed";
1262 break;
1263 }
1264 device_printf(dev,"Link is up %s %s \n",
1265 bpsmsg, "Full Duplex");
1266 }
1267 adapter->link_active = TRUE;
1268 if_link_state_change(ifp, LINK_STATE_UP);
1269 }
1270 } else { /* Link down */
1271 if (adapter->link_active == TRUE) {
1272 if (bootverbose)
1273 device_printf(dev,"Link is Down\n");
1274 if_link_state_change(ifp, LINK_STATE_DOWN);
1275 adapter->link_active = FALSE;
1276 }
1277 }
1278
1279 return;
1280 }
1281
1282
1283 static void
1284 ixv_ifstop(struct ifnet *ifp, int disable)
1285 {
1286 struct adapter *adapter = ifp->if_softc;
1287
1288 IXGBE_CORE_LOCK(adapter);
1289 ixv_stop(adapter);
1290 IXGBE_CORE_UNLOCK(adapter);
1291 }
1292
1293 /*********************************************************************
1294 *
1295 * This routine disables all traffic on the adapter by issuing a
1296 * global reset on the MAC and deallocates TX/RX buffers.
1297 *
1298 **********************************************************************/
1299
1300 static void
1301 ixv_stop(void *arg)
1302 {
1303 struct ifnet *ifp;
1304 struct adapter *adapter = arg;
1305 struct ixgbe_hw *hw = &adapter->hw;
1306 ifp = adapter->ifp;
1307
1308 KASSERT(mutex_owned(&adapter->core_mtx));
1309
1310 INIT_DEBUGOUT("ixv_stop: begin\n");
1311 ixv_disable_intr(adapter);
1312
1313 /* Tell the stack that the interface is no longer active */
1314 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1315
1316 ixgbe_reset_hw(hw);
1317 adapter->hw.adapter_stopped = FALSE;
1318 ixgbe_stop_adapter(hw);
1319 callout_stop(&adapter->timer);
1320
1321 /* reprogram the RAR[0] in case user changed it. */
1322 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1323
1324 return;
1325 }
1326
1327
1328 /*********************************************************************
1329 *
1330 * Determine hardware revision.
1331 *
1332 **********************************************************************/
1333 static void
1334 ixv_identify_hardware(struct adapter *adapter)
1335 {
1336 pcitag_t tag;
1337 pci_chipset_tag_t pc;
1338 pcireg_t subid, id;
1339 struct ixgbe_hw *hw = &adapter->hw;
1340
1341 pc = adapter->osdep.pc;
1342 tag = adapter->osdep.tag;
1343
1344 /*
1345 ** Make sure BUSMASTER is set, on a VM under
1346 ** KVM it may not be and will break things.
1347 */
1348 ixgbe_pci_enable_busmaster(pc, tag);
1349
1350 id = pci_conf_read(pc, tag, PCI_ID_REG);
1351 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1352
1353 /* Save off the information about this board */
1354 hw->vendor_id = PCI_VENDOR(id);
1355 hw->device_id = PCI_PRODUCT(id);
1356 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1357 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1358 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1359
1360 /* We need this to determine device-specific things */
1361 ixgbe_set_mac_type(hw);
1362
1363 /* Set the right number of segments */
1364 adapter->num_segs = IXGBE_82599_SCATTER;
1365
1366 return;
1367 }
1368
1369 /*********************************************************************
1370 *
1371 * Setup MSIX Interrupt resources and handlers
1372 *
1373 **********************************************************************/
1374 static int
1375 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1376 {
1377 device_t dev = adapter->dev;
1378 struct ix_queue *que = adapter->queues;
1379 struct tx_ring *txr = adapter->tx_rings;
1380 int error, rid, vector = 0;
1381 pci_chipset_tag_t pc;
1382 pcitag_t tag;
1383 char intrbuf[PCI_INTRSTR_LEN];
1384 char intr_xname[32];
1385 const char *intrstr = NULL;
1386 kcpuset_t *affinity;
1387 int cpu_id = 0;
1388
1389 pc = adapter->osdep.pc;
1390 tag = adapter->osdep.tag;
1391
1392 adapter->osdep.nintrs = adapter->num_queues + 1;
1393 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
1394 adapter->osdep.nintrs) != 0) {
1395 aprint_error_dev(dev,
1396 "failed to allocate MSI-X interrupt\n");
1397 return (ENXIO);
1398 }
1399
1400 kcpuset_create(&affinity, false);
1401 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1402 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
1403 device_xname(dev), i);
1404 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1405 sizeof(intrbuf));
1406 #ifdef IXV_MPSAFE
1407 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1408 true);
1409 #endif
1410 /* Set the handler function */
1411 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
1412 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
1413 intr_xname);
1414 if (que->res == NULL) {
1415 pci_intr_release(pc, adapter->osdep.intrs,
1416 adapter->osdep.nintrs);
1417 aprint_error_dev(dev,
1418 "Failed to register QUE handler\n");
1419 kcpuset_destroy(affinity);
1420 return (ENXIO);
1421 }
1422 que->msix = vector;
1423 adapter->active_queues |= (u64)(1 << que->msix);
1424
1425 cpu_id = i;
1426 /* Round-robin affinity */
1427 kcpuset_zero(affinity);
1428 kcpuset_set(affinity, cpu_id % ncpu);
1429 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1430 NULL);
1431 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1432 intrstr);
1433 if (error == 0)
1434 aprint_normal(", bound queue %d to cpu %d\n",
1435 i, cpu_id % ncpu);
1436 else
1437 aprint_normal("\n");
1438
1439 #ifndef IXGBE_LEGACY_TX
1440 txr->txr_si = softint_establish(SOFTINT_NET,
1441 ixgbe_deferred_mq_start, txr);
1442 #endif
1443 que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1444 que);
1445 if (que->que_si == NULL) {
1446 aprint_error_dev(dev,
1447 "could not establish software interrupt\n");
1448 }
1449 }
1450
1451 /* and Mailbox */
1452 cpu_id++;
1453 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
1454 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1455 sizeof(intrbuf));
1456 #ifdef IXG_MPSAFE
1457 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
1458 true);
1459 #endif
1460 /* Set the mbx handler function */
1461 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
1462 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
1463 intr_xname);
1464 if (adapter->osdep.ihs[vector] == NULL) {
1465 adapter->res = NULL;
1466 aprint_error_dev(dev, "Failed to register LINK handler\n");
1467 kcpuset_destroy(affinity);
1468 return (ENXIO);
1469 }
1470 /* Round-robin affinity */
1471 kcpuset_zero(affinity);
1472 kcpuset_set(affinity, cpu_id % ncpu);
1473 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1474
1475 aprint_normal_dev(dev,
1476 "for link, interrupting at %s", intrstr);
1477 if (error == 0)
1478 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
1479 else
1480 aprint_normal("\n");
1481
1482 adapter->vector = vector;
1483 /* Tasklets for Mailbox */
1484 adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1485 adapter);
1486 /*
1487 ** Due to a broken design QEMU will fail to properly
1488 ** enable the guest for MSIX unless the vectors in
1489 ** the table are all set up, so we must rewrite the
1490 ** ENABLE in the MSIX control register again at this
1491 ** point to cause it to successfully initialize us.
1492 */
1493 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1494 int msix_ctrl;
1495 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1496 rid += PCI_MSIX_CTL;
1497 msix_ctrl = pci_conf_read(pc, tag, rid);
1498 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1499 pci_conf_write(pc, tag, rid, msix_ctrl);
1500 }
1501
1502 kcpuset_destroy(affinity);
1503 return (0);
1504 }
1505
1506 /*
1507 * Setup MSIX resources, note that the VF
1508 * device MUST use MSIX, there is no fallback.
1509 */
1510 static int
1511 ixv_setup_msix(struct adapter *adapter)
1512 {
1513 device_t dev = adapter->dev;
1514 int want, queues, msgs;
1515
1516 /* Must have at least 2 MSIX vectors */
1517 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1518 if (msgs < 2) {
1519 aprint_error_dev(dev,"MSIX config error\n");
1520 return (ENXIO);
1521 }
1522 msgs = MIN(msgs, IXG_MAX_NINTR);
1523
1524 /* Figure out a reasonable auto config value */
1525 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
1526
1527 if (ixv_num_queues != 0)
1528 queues = ixv_num_queues;
1529 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
1530 queues = IXGBE_VF_MAX_TX_QUEUES;
1531
1532 /*
1533 ** Want vectors for the queues,
1534 ** plus an additional for mailbox.
1535 */
1536 want = queues + 1;
1537 if (msgs >= want)
1538 msgs = want;
1539 else {
1540 aprint_error_dev(dev,
1541 "MSIX Configuration Problem, "
1542 "%d vectors but %d queues wanted!\n",
1543 msgs, want);
1544 return -1;
1545 }
1546
1547 adapter->msix_mem = (void *)1; /* XXX */
1548 aprint_normal_dev(dev,
1549 "Using MSIX interrupts with %d vectors\n", msgs);
1550 adapter->num_queues = queues;
1551 return (msgs);
1552 }
1553
1554
1555 static int
1556 ixv_allocate_pci_resources(struct adapter *adapter,
1557 const struct pci_attach_args *pa)
1558 {
1559 pcireg_t memtype;
1560 device_t dev = adapter->dev;
1561 bus_addr_t addr;
1562 int flags;
1563
1564 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1565 switch (memtype) {
1566 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1567 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1568 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1569 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1570 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1571 goto map_err;
1572 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1573 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1574 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1575 }
1576 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1577 adapter->osdep.mem_size, flags,
1578 &adapter->osdep.mem_bus_space_handle) != 0) {
1579 map_err:
1580 adapter->osdep.mem_size = 0;
1581 aprint_error_dev(dev, "unable to map BAR0\n");
1582 return ENXIO;
1583 }
1584 break;
1585 default:
1586 aprint_error_dev(dev, "unexpected type on BAR0\n");
1587 return ENXIO;
1588 }
1589 adapter->hw.back = adapter;
1590
1591 /* Pick up the tuneable queues */
1592 adapter->num_queues = ixv_num_queues;
1593
1594 /*
1595 ** Now setup MSI/X, should
1596 ** return us the number of
1597 ** configured vectors.
1598 */
1599 adapter->msix = ixv_setup_msix(adapter);
1600 if (adapter->msix == ENXIO)
1601 return (ENXIO);
1602 else
1603 return (0);
1604 }
1605
1606 static void
1607 ixv_free_pci_resources(struct adapter * adapter)
1608 {
1609 struct ix_queue *que = adapter->queues;
1610 int rid;
1611
1612 /*
1613 ** Release all msix queue resources:
1614 */
1615 for (int i = 0; i < adapter->num_queues; i++, que++) {
1616 if (que->res != NULL)
1617 pci_intr_disestablish(adapter->osdep.pc,
1618 adapter->osdep.ihs[i]);
1619 }
1620
1621
1622 /* Clean the Link interrupt last */
1623 rid = adapter->vector;
1624
1625 if (adapter->osdep.ihs[rid] != NULL) {
1626 pci_intr_disestablish(adapter->osdep.pc,
1627 adapter->osdep.ihs[rid]);
1628 adapter->osdep.ihs[rid] = NULL;
1629 }
1630
1631 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1632 adapter->osdep.nintrs);
1633
1634 if (adapter->osdep.mem_size != 0) {
1635 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1636 adapter->osdep.mem_bus_space_handle,
1637 adapter->osdep.mem_size);
1638 }
1639
1640 return;
1641 }
1642
1643 /*********************************************************************
1644 *
1645 * Setup networking device structure and register an interface.
1646 *
1647 **********************************************************************/
1648 static void
1649 ixv_setup_interface(device_t dev, struct adapter *adapter)
1650 {
1651 struct ethercom *ec = &adapter->osdep.ec;
1652 struct ifnet *ifp;
1653
1654 INIT_DEBUGOUT("ixv_setup_interface: begin");
1655
1656 ifp = adapter->ifp = &ec->ec_if;
1657 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1658 ifp->if_baudrate = IF_Gbps(10);
1659 ifp->if_init = ixv_init;
1660 ifp->if_stop = ixv_ifstop;
1661 ifp->if_softc = adapter;
1662 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1663 ifp->if_ioctl = ixv_ioctl;
1664 #ifndef IXGBE_LEGACY_TX
1665 ifp->if_transmit = ixgbe_mq_start;
1666 #endif
1667 ifp->if_start = ixgbe_start;
1668 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1669 IFQ_SET_READY(&ifp->if_snd);
1670
1671 if_initialize(ifp);
1672 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1673 ether_ifattach(ifp, adapter->hw.mac.addr);
1674 /*
1675 * We use per TX queue softint, so if_deferred_start_init() isn't
1676 * used.
1677 */
1678 if_register(ifp);
1679 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1680
1681 adapter->max_frame_size =
1682 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1683
1684 /*
1685 * Tell the upper layer(s) we support long frames.
1686 */
1687 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1688
1689 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1690 ifp->if_capenable = 0;
1691
1692 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1693 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1694 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1695 | ETHERCAP_VLAN_MTU;
1696 ec->ec_capenable = ec->ec_capabilities;
1697
1698 /* Don't enable LRO by default */
1699 ifp->if_capabilities |= IFCAP_LRO;
1700 #if 0
1701 ifp->if_capenable = ifp->if_capabilities;
1702 #endif
1703
1704 /*
1705 ** Dont turn this on by default, if vlans are
1706 ** created on another pseudo device (eg. lagg)
1707 ** then vlan events are not passed thru, breaking
1708 ** operation, but with HW FILTER off it works. If
1709 ** using vlans directly on the em driver you can
1710 ** enable this and get full hardware tag filtering.
1711 */
1712 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1713
1714 /*
1715 * Specify the media types supported by this adapter and register
1716 * callbacks to update media and link information
1717 */
1718 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1719 ixv_media_status);
1720 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1721 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1722
1723 return;
1724 }
1725
1726 static void
1727 ixv_config_link(struct adapter *adapter)
1728 {
1729 struct ixgbe_hw *hw = &adapter->hw;
1730
1731 if (hw->mac.ops.check_link)
1732 hw->mac.ops.check_link(hw, &adapter->link_speed,
1733 &adapter->link_up, FALSE);
1734 }
1735
1736
1737 /*********************************************************************
1738 *
1739 * Enable transmit unit.
1740 *
1741 **********************************************************************/
1742 static void
1743 ixv_initialize_transmit_units(struct adapter *adapter)
1744 {
1745 struct tx_ring *txr = adapter->tx_rings;
1746 struct ixgbe_hw *hw = &adapter->hw;
1747
1748
1749 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1750 u64 tdba = txr->txdma.dma_paddr;
1751 u32 txctrl, txdctl;
1752
1753 /* Set WTHRESH to 8, burst writeback */
1754 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1755 txdctl |= (8 << 16);
1756 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1757
1758 /* Set the HW Tx Head and Tail indices */
1759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1760 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1761
1762 /* Set Tx Tail register */
1763 txr->tail = IXGBE_VFTDT(i);
1764
1765 /* Set Ring parameters */
1766 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1767 (tdba & 0x00000000ffffffffULL));
1768 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1769 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1770 adapter->num_tx_desc *
1771 sizeof(struct ixgbe_legacy_tx_desc));
1772 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1773 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1774 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1775
1776 /* Now enable */
1777 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1778 txdctl |= IXGBE_TXDCTL_ENABLE;
1779 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1780 }
1781
1782 return;
1783 }
1784
1785
1786 /*********************************************************************
1787 *
1788 * Setup receive registers and features.
1789 *
1790 **********************************************************************/
1791 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1792
1793 static void
1794 ixv_initialize_receive_units(struct adapter *adapter)
1795 {
1796 struct rx_ring *rxr = adapter->rx_rings;
1797 struct ixgbe_hw *hw = &adapter->hw;
1798 struct ifnet *ifp = adapter->ifp;
1799 u32 bufsz, rxcsum, psrtype;
1800
1801 if (ifp->if_mtu > ETHERMTU)
1802 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1803 else
1804 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1805
1806 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1807 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1808 IXGBE_PSRTYPE_L2HDR;
1809
1810 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1811
1812 /* Tell PF our max_frame size */
1813 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1814
1815 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1816 u64 rdba = rxr->rxdma.dma_paddr;
1817 u32 reg, rxdctl;
1818
1819 /* Disable the queue */
1820 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1821 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1822 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1823 for (int j = 0; j < 10; j++) {
1824 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1825 IXGBE_RXDCTL_ENABLE)
1826 msec_delay(1);
1827 else
1828 break;
1829 }
1830 wmb();
1831 /* Setup the Base and Length of the Rx Descriptor Ring */
1832 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1833 (rdba & 0x00000000ffffffffULL));
1834 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1835 (rdba >> 32));
1836 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1837 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1838
1839 /* Reset the ring indices */
1840 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1841 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1842
1843 /* Set up the SRRCTL register */
1844 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1845 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1846 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1847 reg |= bufsz;
1848 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1849 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1850
1851 /* Capture Rx Tail index */
1852 rxr->tail = IXGBE_VFRDT(rxr->me);
1853
1854 /* Do the queue enabling last */
1855 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1856 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1857 for (int k = 0; k < 10; k++) {
1858 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1859 IXGBE_RXDCTL_ENABLE)
1860 break;
1861 else
1862 msec_delay(1);
1863 }
1864 wmb();
1865
1866 /* Set the Tail Pointer */
1867 #ifdef DEV_NETMAP
1868 /*
1869 * In netmap mode, we must preserve the buffers made
1870 * available to userspace before the if_init()
1871 * (this is true by default on the TX side, because
1872 * init makes all buffers available to userspace).
1873 *
1874 * netmap_reset() and the device specific routines
1875 * (e.g. ixgbe_setup_receive_rings()) map these
1876 * buffers at the end of the NIC ring, so here we
1877 * must set the RDT (tail) register to make sure
1878 * they are not overwritten.
1879 *
1880 * In this driver the NIC ring starts at RDH = 0,
1881 * RDT points to the last slot available for reception (?),
1882 * so RDT = num_rx_desc - 1 means the whole ring is available.
1883 */
1884 if (ifp->if_capenable & IFCAP_NETMAP) {
1885 struct netmap_adapter *na = NA(adapter->ifp);
1886 struct netmap_kring *kring = &na->rx_rings[i];
1887 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1888
1889 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1890 } else
1891 #endif /* DEV_NETMAP */
1892 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1893 adapter->num_rx_desc - 1);
1894 }
1895
1896 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1897
1898 if (ifp->if_capenable & IFCAP_RXCSUM)
1899 rxcsum |= IXGBE_RXCSUM_PCSD;
1900
1901 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1902 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1903
1904 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1905
1906 return;
1907 }
1908
1909 static void
1910 ixv_setup_vlan_support(struct adapter *adapter)
1911 {
1912 struct ixgbe_hw *hw = &adapter->hw;
1913 u32 ctrl, vid, vfta, retry;
1914 struct rx_ring *rxr;
1915
1916 /*
1917 ** We get here thru init_locked, meaning
1918 ** a soft reset, this has already cleared
1919 ** the VFTA and other state, so if there
1920 ** have been no vlan's registered do nothing.
1921 */
1922 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1923 return;
1924
1925 /* Enable the queues */
1926 for (int i = 0; i < adapter->num_queues; i++) {
1927 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1928 ctrl |= IXGBE_RXDCTL_VME;
1929 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1930 /*
1931 * Let Rx path know that it needs to store VLAN tag
1932 * as part of extra mbuf info.
1933 */
1934 rxr = &adapter->rx_rings[i];
1935 rxr->vtag_strip = TRUE;
1936 }
1937
1938 /*
1939 ** A soft reset zero's out the VFTA, so
1940 ** we need to repopulate it now.
1941 */
1942 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1943 if (ixv_shadow_vfta[i] == 0)
1944 continue;
1945 vfta = ixv_shadow_vfta[i];
1946 /*
1947 ** Reconstruct the vlan id's
1948 ** based on the bits set in each
1949 ** of the array ints.
1950 */
1951 for (int j = 0; j < 32; j++) {
1952 retry = 0;
1953 if ((vfta & (1 << j)) == 0)
1954 continue;
1955 vid = (i * 32) + j;
1956 /* Call the shared code mailbox routine */
1957 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1958 if (++retry > 5)
1959 break;
1960 }
1961 }
1962 }
1963 }
1964
1965 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1966 /*
1967 ** This routine is run via an vlan config EVENT,
1968 ** it enables us to use the HW Filter table since
1969 ** we can get the vlan id. This just creates the
1970 ** entry in the soft version of the VFTA, init will
1971 ** repopulate the real table.
1972 */
1973 static void
1974 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1975 {
1976 struct adapter *adapter = ifp->if_softc;
1977 u16 index, bit;
1978
1979 if (ifp->if_softc != arg) /* Not our event */
1980 return;
1981
1982 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1983 return;
1984
1985 IXGBE_CORE_LOCK(adapter);
1986 index = (vtag >> 5) & 0x7F;
1987 bit = vtag & 0x1F;
1988 ixv_shadow_vfta[index] |= (1 << bit);
1989 /* Re-init to load the changes */
1990 ixv_init_locked(adapter);
1991 IXGBE_CORE_UNLOCK(adapter);
1992 }
1993
1994 /*
1995 ** This routine is run via an vlan
1996 ** unconfig EVENT, remove our entry
1997 ** in the soft vfta.
1998 */
1999 static void
2000 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2001 {
2002 struct adapter *adapter = ifp->if_softc;
2003 u16 index, bit;
2004
2005 if (ifp->if_softc != arg)
2006 return;
2007
2008 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2009 return;
2010
2011 IXGBE_CORE_LOCK(adapter);
2012 index = (vtag >> 5) & 0x7F;
2013 bit = vtag & 0x1F;
2014 ixv_shadow_vfta[index] &= ~(1 << bit);
2015 /* Re-init to load the changes */
2016 ixv_init_locked(adapter);
2017 IXGBE_CORE_UNLOCK(adapter);
2018 }
2019 #endif
2020
2021 static void
2022 ixv_enable_intr(struct adapter *adapter)
2023 {
2024 struct ixgbe_hw *hw = &adapter->hw;
2025 struct ix_queue *que = adapter->queues;
2026 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2027
2028
2029 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
2030
2031 mask = IXGBE_EIMS_ENABLE_MASK;
2032 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
2033 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2034
2035 for (int i = 0; i < adapter->num_queues; i++, que++)
2036 ixv_enable_queue(adapter, que->msix);
2037
2038 IXGBE_WRITE_FLUSH(hw);
2039
2040 return;
2041 }
2042
2043 static void
2044 ixv_disable_intr(struct adapter *adapter)
2045 {
2046 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2047 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
2048 IXGBE_WRITE_FLUSH(&adapter->hw);
2049 return;
2050 }
2051
2052 /*
2053 ** Setup the correct IVAR register for a particular MSIX interrupt
2054 ** - entry is the register array entry
2055 ** - vector is the MSIX vector for this queue
2056 ** - type is RX/TX/MISC
2057 */
2058 static void
2059 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2060 {
2061 struct ixgbe_hw *hw = &adapter->hw;
2062 u32 ivar, index;
2063
2064 vector |= IXGBE_IVAR_ALLOC_VAL;
2065
2066 if (type == -1) { /* MISC IVAR */
2067 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2068 ivar &= ~0xFF;
2069 ivar |= vector;
2070 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2071 } else { /* RX/TX IVARS */
2072 index = (16 * (entry & 1)) + (8 * type);
2073 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2074 ivar &= ~(0xFF << index);
2075 ivar |= (vector << index);
2076 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2077 }
2078 }
2079
2080 static void
2081 ixv_configure_ivars(struct adapter *adapter)
2082 {
2083 struct ix_queue *que = adapter->queues;
2084
2085 for (int i = 0; i < adapter->num_queues; i++, que++) {
2086 /* First the RX queue entry */
2087 ixv_set_ivar(adapter, i, que->msix, 0);
2088 /* ... and the TX */
2089 ixv_set_ivar(adapter, i, que->msix, 1);
2090 /* Set an initial value in EITR */
2091 IXGBE_WRITE_REG(&adapter->hw,
2092 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
2093 }
2094
2095 /* For the mailbox interrupt */
2096 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2097 }
2098
2099
2100 /*
2101 ** Tasklet handler for MSIX MBX interrupts
2102 ** - do outside interrupt since it might sleep
2103 */
2104 static void
2105 ixv_handle_mbx(void *context)
2106 {
2107 struct adapter *adapter = context;
2108
2109 ixgbe_check_link(&adapter->hw,
2110 &adapter->link_speed, &adapter->link_up, 0);
2111 ixv_update_link_status(adapter);
2112 }
2113
2114 /*
2115 ** The VF stats registers never have a truly virgin
2116 ** starting point, so this routine tries to make an
2117 ** artificial one, marking ground zero on attach as
2118 ** it were.
2119 */
2120 static void
2121 ixv_save_stats(struct adapter *adapter)
2122 {
2123 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2124
2125 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2126 stats->saved_reset_vfgprc +=
2127 stats->vfgprc.ev_count - stats->base_vfgprc;
2128 stats->saved_reset_vfgptc +=
2129 stats->vfgptc.ev_count - stats->base_vfgptc;
2130 stats->saved_reset_vfgorc +=
2131 stats->vfgorc.ev_count - stats->base_vfgorc;
2132 stats->saved_reset_vfgotc +=
2133 stats->vfgotc.ev_count - stats->base_vfgotc;
2134 stats->saved_reset_vfmprc +=
2135 stats->vfmprc.ev_count - stats->base_vfmprc;
2136 }
2137 }
2138
2139 static void
2140 ixv_init_stats(struct adapter *adapter)
2141 {
2142 struct ixgbe_hw *hw = &adapter->hw;
2143
2144 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2145 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2146 adapter->stats.vf.last_vfgorc |=
2147 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2148
2149 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2150 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2151 adapter->stats.vf.last_vfgotc |=
2152 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2153
2154 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2155
2156 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2157 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2158 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2159 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2160 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2161 }
2162
2163 #define UPDATE_STAT_32(reg, last, count) \
2164 { \
2165 u32 current = IXGBE_READ_REG(hw, reg); \
2166 if (current < last) \
2167 count.ev_count += 0x100000000LL; \
2168 last = current; \
2169 count.ev_count &= 0xFFFFFFFF00000000LL; \
2170 count.ev_count |= current; \
2171 }
2172
2173 #define UPDATE_STAT_36(lsb, msb, last, count) \
2174 { \
2175 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2176 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2177 u64 current = ((cur_msb << 32) | cur_lsb); \
2178 if (current < last) \
2179 count.ev_count += 0x1000000000LL; \
2180 last = current; \
2181 count.ev_count &= 0xFFFFFFF000000000LL; \
2182 count.ev_count |= current; \
2183 }
2184
2185 /*
2186 ** ixv_update_stats - Update the board statistics counters.
2187 */
2188 void
2189 ixv_update_stats(struct adapter *adapter)
2190 {
2191 struct ixgbe_hw *hw = &adapter->hw;
2192
2193 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2194 adapter->stats.vf.vfgprc);
2195 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2196 adapter->stats.vf.vfgptc);
2197 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2198 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2199 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2200 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2201 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2202 adapter->stats.vf.vfmprc);
2203 }
2204
2205 /**********************************************************************
2206 *
2207 * This routine is called only when em_display_debug_stats is enabled.
2208 * This routine provides a way to take a look at important statistics
2209 * maintained by the driver and hardware.
2210 *
2211 **********************************************************************/
2212 static void
2213 ixv_print_debug_info(struct adapter *adapter)
2214 {
2215 device_t dev = adapter->dev;
2216 struct ixgbe_hw *hw = &adapter->hw;
2217 struct ix_queue *que = adapter->queues;
2218 struct rx_ring *rxr;
2219 struct tx_ring *txr;
2220 #ifdef LRO
2221 struct lro_ctrl *lro;
2222 #endif /* LRO */
2223
2224 device_printf(dev,"Error Byte Count = %u \n",
2225 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2226
2227 for (int i = 0; i < adapter->num_queues; i++, que++) {
2228 txr = que->txr;
2229 rxr = que->rxr;
2230 #ifdef LRO
2231 lro = &rxr->lro;
2232 #endif /* LRO */
2233 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2234 que->msix, (long)que->irqs.ev_count);
2235 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2236 rxr->me, (long long)rxr->rx_packets.ev_count);
2237 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2238 rxr->me, (long)rxr->rx_bytes.ev_count);
2239 #ifdef LRO
2240 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2241 rxr->me, (long long)lro->lro_queued);
2242 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2243 rxr->me, (long long)lro->lro_flushed);
2244 #endif /* LRO */
2245 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2246 txr->me, (long)txr->total_packets.ev_count);
2247 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2248 txr->me, (long)txr->no_desc_avail.ev_count);
2249 }
2250
2251 device_printf(dev,"MBX IRQ Handled: %lu\n",
2252 (long)adapter->link_irq.ev_count);
2253 return;
2254 }
2255
2256 static int
2257 ixv_sysctl_debug(SYSCTLFN_ARGS)
2258 {
2259 struct sysctlnode node;
2260 int error, result;
2261 struct adapter *adapter;
2262
2263 node = *rnode;
2264 adapter = (struct adapter *)node.sysctl_data;
2265 node.sysctl_data = &result;
2266 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2267
2268 if (error)
2269 return error;
2270
2271 if (result == 1)
2272 ixv_print_debug_info(adapter);
2273
2274 return 0;
2275 }
2276
2277 const struct sysctlnode *
2278 ixv_sysctl_instance(struct adapter *adapter)
2279 {
2280 const char *dvname;
2281 struct sysctllog **log;
2282 int rc;
2283 const struct sysctlnode *rnode;
2284
2285 log = &adapter->sysctllog;
2286 dvname = device_xname(adapter->dev);
2287
2288 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2289 0, CTLTYPE_NODE, dvname,
2290 SYSCTL_DESCR("ixv information and settings"),
2291 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2292 goto err;
2293
2294 return rnode;
2295 err:
2296 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2297 return NULL;
2298 }
2299
2300 static void
2301 ixv_add_device_sysctls(struct adapter *adapter)
2302 {
2303 struct sysctllog **log;
2304 const struct sysctlnode *rnode, *cnode;
2305 device_t dev;
2306
2307 dev = adapter->dev;
2308 log = &adapter->sysctllog;
2309
2310 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2311 aprint_error_dev(dev, "could not create sysctl root\n");
2312 return;
2313 }
2314
2315 if (sysctl_createv(log, 0, &rnode, &cnode,
2316 CTLFLAG_READWRITE, CTLTYPE_INT,
2317 "debug", SYSCTL_DESCR("Debug Info"),
2318 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2319 aprint_error_dev(dev, "could not create sysctl\n");
2320
2321 if (sysctl_createv(log, 0, &rnode, &cnode,
2322 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2323 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2324 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2325 aprint_error_dev(dev, "could not create sysctl\n");
2326 }
2327
2328 /*
2329 * Add statistic sysctls for the VF.
2330 */
2331 static void
2332 ixv_add_stats_sysctls(struct adapter *adapter)
2333 {
2334 device_t dev = adapter->dev;
2335 const struct sysctlnode *rnode;
2336 struct sysctllog **log = &adapter->sysctllog;
2337 struct ix_queue *que = &adapter->queues[0];
2338 struct tx_ring *txr = que->txr;
2339 struct rx_ring *rxr = que->rxr;
2340
2341 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2342 const char *xname = device_xname(dev);
2343
2344 /* Driver Statistics */
2345 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2346 NULL, xname, "Handled queue in softint");
2347 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2348 NULL, xname, "Requeued in softint");
2349 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2350 NULL, xname, "Driver tx dma soft fail EFBIG");
2351 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2352 NULL, xname, "m_defrag() failed");
2353 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2354 NULL, xname, "Driver tx dma hard fail EFBIG");
2355 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2356 NULL, xname, "Driver tx dma hard fail EINVAL");
2357 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2358 NULL, xname, "Driver tx dma hard fail other");
2359 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2360 NULL, xname, "Driver tx dma soft fail EAGAIN");
2361 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2362 NULL, xname, "Driver tx dma soft fail ENOMEM");
2363 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2364 NULL, xname, "Watchdog timeouts");
2365 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2366 NULL, xname, "TSO errors");
2367 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2368 NULL, xname, "Link MSIX IRQ Handled");
2369
2370 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2371 snprintf(adapter->queues[i].evnamebuf,
2372 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2373 xname, i);
2374 snprintf(adapter->queues[i].namebuf,
2375 sizeof(adapter->queues[i].namebuf), "q%d", i);
2376
2377 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2378 aprint_error_dev(dev, "could not create sysctl root\n");
2379 break;
2380 }
2381
2382 if (sysctl_createv(log, 0, &rnode, &rnode,
2383 0, CTLTYPE_NODE,
2384 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2385 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2386 break;
2387
2388 #if 0 /* not yet */
2389 if (sysctl_createv(log, 0, &rnode, &cnode,
2390 CTLFLAG_READWRITE, CTLTYPE_INT,
2391 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2392 ixgbe_sysctl_interrupt_rate_handler, 0,
2393 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2394 break;
2395
2396 if (sysctl_createv(log, 0, &rnode, &cnode,
2397 CTLFLAG_READONLY, CTLTYPE_QUAD,
2398 "irqs", SYSCTL_DESCR("irqs on this queue"),
2399 NULL, 0, &(adapter->queues[i].irqs),
2400 0, CTL_CREATE, CTL_EOL) != 0)
2401 break;
2402
2403 if (sysctl_createv(log, 0, &rnode, &cnode,
2404 CTLFLAG_READONLY, CTLTYPE_INT,
2405 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2406 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2407 0, CTL_CREATE, CTL_EOL) != 0)
2408 break;
2409
2410 if (sysctl_createv(log, 0, &rnode, &cnode,
2411 CTLFLAG_READONLY, CTLTYPE_INT,
2412 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2413 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2414 0, CTL_CREATE, CTL_EOL) != 0)
2415 break;
2416 #endif
2417 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2418 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2419 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2420 NULL, adapter->queues[i].evnamebuf, "TSO");
2421 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2422 NULL, adapter->queues[i].evnamebuf,
2423 "Queue No Descriptor Available");
2424 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2425 NULL, adapter->queues[i].evnamebuf,
2426 "Queue Packets Transmitted");
2427 #ifndef IXGBE_LEGACY_TX
2428 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2429 NULL, adapter->queues[i].evnamebuf,
2430 "Packets dropped in pcq");
2431 #endif
2432
2433 #ifdef LRO
2434 struct lro_ctrl *lro = &rxr->lro;
2435 #endif /* LRO */
2436
2437 #if 0 /* not yet */
2438 if (sysctl_createv(log, 0, &rnode, &cnode,
2439 CTLFLAG_READONLY,
2440 CTLTYPE_INT,
2441 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2442 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2443 CTL_CREATE, CTL_EOL) != 0)
2444 break;
2445
2446 if (sysctl_createv(log, 0, &rnode, &cnode,
2447 CTLFLAG_READONLY,
2448 CTLTYPE_INT,
2449 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2450 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2451 CTL_CREATE, CTL_EOL) != 0)
2452 break;
2453 #endif
2454
2455 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2456 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2457 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2458 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2459 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2460 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2461 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2462 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2463 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2464 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2465 #ifdef LRO
2466 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2467 CTLFLAG_RD, &lro->lro_queued, 0,
2468 "LRO Queued");
2469 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2470 CTLFLAG_RD, &lro->lro_flushed, 0,
2471 "LRO Flushed");
2472 #endif /* LRO */
2473 }
2474
2475 /* MAC stats get the own sub node */
2476
2477 snprintf(stats->namebuf,
2478 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2479
2480 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2481 stats->namebuf, "rx csum offload - IP");
2482 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2483 stats->namebuf, "rx csum offload - L4");
2484 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2485 stats->namebuf, "rx csum offload - IP bad");
2486 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2487 stats->namebuf, "rx csum offload - L4 bad");
2488
2489 /* Packet Reception Stats */
2490 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2491 xname, "Good Packets Received");
2492 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2493 xname, "Good Octets Received");
2494 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2495 xname, "Multicast Packets Received");
2496 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2497 xname, "Good Packets Transmitted");
2498 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2499 xname, "Good Octets Transmitted");
2500 }
2501
2502 static void
2503 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2504 const char *description, int *limit, int value)
2505 {
2506 device_t dev = adapter->dev;
2507 struct sysctllog **log;
2508 const struct sysctlnode *rnode, *cnode;
2509
2510 log = &adapter->sysctllog;
2511 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2512 aprint_error_dev(dev, "could not create sysctl root\n");
2513 return;
2514 }
2515 if (sysctl_createv(log, 0, &rnode, &cnode,
2516 CTLFLAG_READWRITE, CTLTYPE_INT,
2517 name, SYSCTL_DESCR(description),
2518 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2519 aprint_error_dev(dev, "could not create sysctl\n");
2520 *limit = value;
2521 }
2522