ixv.c revision 1.24 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 285590 2015-07-15 00:35:50Z pkelsey $*/
34 /*$NetBSD: ixv.c,v 1.24 2016/12/02 10:24:31 msaitoh Exp $*/
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "vlan.h"
41
42 /*********************************************************************
43 * Driver version
44 *********************************************************************/
45 char ixv_driver_version[] = "1.4.0";
46
47 /*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 /* required last entry */
64 {0, 0, 0, 0, 0}
65 };
66
67 /*********************************************************************
68 * Table of branding strings
69 *********************************************************************/
70
71 static const char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /*********************************************************************
76 * Function prototypes
77 *********************************************************************/
78 static int ixv_probe(device_t, cfdata_t, void *);
79 static void ixv_attach(device_t, device_t, void *);
80 static int ixv_detach(device_t, int);
81 #if 0
82 static int ixv_shutdown(device_t);
83 #endif
84 static int ixv_ioctl(struct ifnet *, u_long, void *);
85 static int ixv_init(struct ifnet *);
86 static void ixv_init_locked(struct adapter *);
87 static void ixv_stop(void *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static void ixv_identify_hardware(struct adapter *);
91 static int ixv_allocate_pci_resources(struct adapter *,
92 const struct pci_attach_args *);
93 static int ixv_allocate_msix(struct adapter *,
94 const struct pci_attach_args *);
95 static int ixv_setup_msix(struct adapter *);
96 static void ixv_free_pci_resources(struct adapter *);
97 static void ixv_local_timer(void *);
98 static void ixv_local_timer_locked(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
101
102 static void ixv_initialize_transmit_units(struct adapter *);
103 static void ixv_initialize_receive_units(struct adapter *);
104
105 static void ixv_enable_intr(struct adapter *);
106 static void ixv_disable_intr(struct adapter *);
107 static void ixv_set_multi(struct adapter *);
108 static void ixv_update_link_status(struct adapter *);
109 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
110 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
111 static void ixv_configure_ivars(struct adapter *);
112 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
113
114 static void ixv_setup_vlan_support(struct adapter *);
115 #if 0
116 static void ixv_register_vlan(void *, struct ifnet *, u16);
117 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
118 #endif
119
120 static void ixv_save_stats(struct adapter *);
121 static void ixv_init_stats(struct adapter *);
122 static void ixv_update_stats(struct adapter *);
123 static void ixv_add_stats_sysctls(struct adapter *);
124
125 /* The MSI/X Interrupt handlers */
126 static int ixv_msix_que(void *);
127 static int ixv_msix_mbx(void *);
128
129 /* Deferred interrupt tasklets */
130 static void ixv_handle_que(void *);
131 static void ixv_handle_mbx(void *);
132
133 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
134 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
135
136 /*********************************************************************
137 * FreeBSD Device Interface Entry Points
138 *********************************************************************/
139
140 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
141 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
142 DVF_DETACH_SHUTDOWN);
143
144 # if 0
145 static device_method_t ixv_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, ixv_probe),
148 DEVMETHOD(device_attach, ixv_attach),
149 DEVMETHOD(device_detach, ixv_detach),
150 DEVMETHOD(device_shutdown, ixv_shutdown),
151 DEVMETHOD_END
152 };
153 #endif
154
155 #if 0
156 static driver_t ixv_driver = {
157 "ixv", ixv_methods, sizeof(struct adapter),
158 };
159
160 devclass_t ixv_devclass;
161 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
162 MODULE_DEPEND(ixv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixv, ether, 1, 1, 1);
164 /* XXX depend on 'ix' ? */
165 #endif
166
167 /*
168 ** TUNEABLE PARAMETERS:
169 */
170
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 #define TUNABLE_INT(__x, __y)
174 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
175
176 /*
177 ** AIM: Adaptive Interrupt Moderation
178 ** which means that the interrupt rate
179 ** is varied over time based on the
180 ** traffic for that interrupt vector
181 */
182 static int ixv_enable_aim = FALSE;
183 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
184
185 /* How many packets rxeof tries to clean at a time */
186 static int ixv_rx_process_limit = 256;
187 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
188
189 /* How many packets txeof tries to clean at a time */
190 static int ixv_tx_process_limit = 256;
191 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
192
193 /*
194 ** Number of TX descriptors per ring,
195 ** setting higher than RX as this seems
196 ** the better performing choice.
197 */
198 static int ixv_txd = DEFAULT_TXD;
199 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
200
201 /* Number of RX descriptors per ring */
202 static int ixv_rxd = DEFAULT_RXD;
203 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
204
205 /*
206 ** Shadow VFTA table, this is needed because
207 ** the real filter table gets cleared during
208 ** a soft reset and we need to repopulate it.
209 */
210 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
211
212 /*********************************************************************
213 * Device identification routine
214 *
215 * ixv_probe determines if the driver should be loaded on
216 * adapter based on PCI vendor/device id of the adapter.
217 *
218 * return 1 on success, 0 on failure
219 *********************************************************************/
220
221 static int
222 ixv_probe(device_t dev, cfdata_t cf, void *aux)
223 {
224 #ifdef __HAVE_PCI_MSI_MSIX
225 const struct pci_attach_args *pa = aux;
226
227 return (ixv_lookup(pa) != NULL) ? 1 : 0;
228 #else
229 return 0;
230 #endif
231 }
232
233 static ixgbe_vendor_info_t *
234 ixv_lookup(const struct pci_attach_args *pa)
235 {
236 pcireg_t subid;
237 ixgbe_vendor_info_t *ent;
238
239 INIT_DEBUGOUT("ixv_probe: begin");
240
241 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
242 return NULL;
243
244 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
245
246 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
247 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
248 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
249
250 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
251 (ent->subvendor_id == 0)) &&
252
253 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
254 (ent->subdevice_id == 0))) {
255 return ent;
256 }
257 }
258 return NULL;
259 }
260
261
262 static void
263 ixv_sysctl_attach(struct adapter *adapter)
264 {
265 struct sysctllog **log;
266 const struct sysctlnode *rnode, *cnode;
267 device_t dev;
268
269 dev = adapter->dev;
270 log = &adapter->sysctllog;
271
272 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
273 aprint_error_dev(dev, "could not create sysctl root\n");
274 return;
275 }
276
277 if (sysctl_createv(log, 0, &rnode, &cnode,
278 CTLFLAG_READWRITE, CTLTYPE_INT,
279 "debug", SYSCTL_DESCR("Debug Info"),
280 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
281 aprint_error_dev(dev, "could not create sysctl\n");
282
283 /* XXX This is an *instance* sysctl controlling a *global* variable.
284 * XXX It's that way in the FreeBSD driver that this derives from.
285 */
286 if (sysctl_createv(log, 0, &rnode, &cnode,
287 CTLFLAG_READWRITE, CTLTYPE_INT,
288 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
289 NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
290 aprint_error_dev(dev, "could not create sysctl\n");
291 }
292
293 /*********************************************************************
294 * Device initialization routine
295 *
296 * The attach entry point is called when the driver is being loaded.
297 * This routine identifies the type of hardware, allocates all resources
298 * and initializes the hardware.
299 *
300 * return 0 on success, positive on failure
301 *********************************************************************/
302
303 static void
304 ixv_attach(device_t parent, device_t dev, void *aux)
305 {
306 struct adapter *adapter;
307 struct ixgbe_hw *hw;
308 int error = 0;
309 ixgbe_vendor_info_t *ent;
310 const struct pci_attach_args *pa = aux;
311
312 INIT_DEBUGOUT("ixv_attach: begin");
313
314 /* Allocate, clear, and link in our adapter structure */
315 adapter = device_private(dev);
316 adapter->dev = adapter->osdep.dev = dev;
317 hw = &adapter->hw;
318 adapter->osdep.pc = pa->pa_pc;
319 adapter->osdep.tag = pa->pa_tag;
320 adapter->osdep.dmat = pa->pa_dmat;
321 adapter->osdep.attached = false;
322
323 ent = ixv_lookup(pa);
324
325 KASSERT(ent != NULL);
326
327 aprint_normal(": %s, Version - %s\n",
328 ixv_strings[ent->index], ixv_driver_version);
329
330 /* Core Lock Init*/
331 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
332
333 /* SYSCTL APIs */
334 ixv_sysctl_attach(adapter);
335
336 /* Set up the timer callout */
337 callout_init(&adapter->timer, 0);
338
339 /* Determine hardware revision */
340 ixv_identify_hardware(adapter);
341
342 /* Do base PCI setup - map BAR0 */
343 if (ixv_allocate_pci_resources(adapter, pa)) {
344 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
345 error = ENXIO;
346 goto err_out;
347 }
348
349 /* Do descriptor calc and sanity checks */
350 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
351 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
352 aprint_error_dev(dev, "TXD config issue, using default!\n");
353 adapter->num_tx_desc = DEFAULT_TXD;
354 } else
355 adapter->num_tx_desc = ixv_txd;
356
357 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
358 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
359 aprint_error_dev(dev, "RXD config issue, using default!\n");
360 adapter->num_rx_desc = DEFAULT_RXD;
361 } else
362 adapter->num_rx_desc = ixv_rxd;
363
364 /* Allocate our TX/RX Queues */
365 if (ixgbe_allocate_queues(adapter)) {
366 error = ENOMEM;
367 goto err_out;
368 }
369
370 /*
371 ** Initialize the shared code: its
372 ** at this point the mac type is set.
373 */
374 error = ixgbe_init_shared_code(hw);
375 if (error) {
376 aprint_error_dev(dev,"Shared Code Initialization Failure\n");
377 error = EIO;
378 goto err_late;
379 }
380
381 /* Setup the mailbox */
382 ixgbe_init_mbx_params_vf(hw);
383
384 ixgbe_reset_hw(hw);
385
386 /* Get the Mailbox API version */
387 device_printf(dev,"MBX API %d negotiation: %d\n",
388 ixgbe_mbox_api_11,
389 ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
390
391 error = ixgbe_init_hw(hw);
392 if (error) {
393 aprint_error_dev(dev,"Hardware Initialization Failure\n");
394 error = EIO;
395 goto err_late;
396 }
397
398 error = ixv_allocate_msix(adapter, pa);
399 if (error)
400 goto err_late;
401
402 /* If no mac address was assigned, make a random one */
403 if (!ixv_check_ether_addr(hw->mac.addr)) {
404 u8 addr[ETHER_ADDR_LEN];
405 uint64_t rndval = cprng_fast64();
406
407 memcpy(addr, &rndval, sizeof(addr));
408 addr[0] &= 0xFE;
409 addr[0] |= 0x02;
410 bcopy(addr, hw->mac.addr, sizeof(addr));
411 }
412
413 /* Setup OS specific network interface */
414 ixv_setup_interface(dev, adapter);
415
416 /* Do the stats setup */
417 ixv_save_stats(adapter);
418 ixv_init_stats(adapter);
419 ixv_add_stats_sysctls(adapter);
420
421 /* Register for VLAN events */
422 #if 0 /* XXX delete after write? */
423 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
424 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
425 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
426 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
427 #endif
428
429 INIT_DEBUGOUT("ixv_attach: end");
430 adapter->osdep.attached = true;
431 return;
432
433 err_late:
434 ixgbe_free_transmit_structures(adapter);
435 ixgbe_free_receive_structures(adapter);
436 err_out:
437 ixv_free_pci_resources(adapter);
438 return;
439
440 }
441
442 /*********************************************************************
443 * Device removal routine
444 *
445 * The detach entry point is called when the driver is being removed.
446 * This routine stops the adapter and deallocates all the resources
447 * that were allocated for driver operation.
448 *
449 * return 0 on success, positive on failure
450 *********************************************************************/
451
452 static int
453 ixv_detach(device_t dev, int flags)
454 {
455 struct adapter *adapter = device_private(dev);
456 struct ix_queue *que = adapter->queues;
457
458 INIT_DEBUGOUT("ixv_detach: begin");
459 if (adapter->osdep.attached == false)
460 return 0;
461
462 #if NVLAN > 0
463 /* Make sure VLANS are not using driver */
464 if (!VLAN_ATTACHED(&adapter->osdep.ec))
465 ; /* nothing to do: no VLANs */
466 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
467 vlan_ifdetach(adapter->ifp);
468 else {
469 aprint_error_dev(dev, "VLANs in use\n");
470 return EBUSY;
471 }
472 #endif
473
474 IXGBE_CORE_LOCK(adapter);
475 ixv_stop(adapter);
476 IXGBE_CORE_UNLOCK(adapter);
477
478 for (int i = 0; i < adapter->num_queues; i++, que++) {
479 #ifndef IXGBE_LEGACY_TX
480 softint_disestablish(txr->txq_si);
481 #endif
482 softint_disestablish(que->que_si);
483 }
484
485 /* Drain the Mailbox(link) queue */
486 softint_disestablish(adapter->link_si);
487
488 /* Unregister VLAN events */
489 #if 0 /* XXX msaitoh delete after write? */
490 if (adapter->vlan_attach != NULL)
491 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
492 if (adapter->vlan_detach != NULL)
493 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
494 #endif
495
496 ether_ifdetach(adapter->ifp);
497 callout_halt(&adapter->timer, NULL);
498 ixv_free_pci_resources(adapter);
499 #if 0 /* XXX the NetBSD port is probably missing something here */
500 bus_generic_detach(dev);
501 #endif
502 if_detach(adapter->ifp);
503
504 ixgbe_free_transmit_structures(adapter);
505 ixgbe_free_receive_structures(adapter);
506
507 IXGBE_CORE_LOCK_DESTROY(adapter);
508 return (0);
509 }
510
511 /*********************************************************************
512 *
513 * Shutdown entry point
514 *
515 **********************************************************************/
516 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
517 static int
518 ixv_shutdown(device_t dev)
519 {
520 struct adapter *adapter = device_private(dev);
521 IXGBE_CORE_LOCK(adapter);
522 ixv_stop(adapter);
523 IXGBE_CORE_UNLOCK(adapter);
524 return (0);
525 }
526 #endif
527
528 static int
529 ixv_ifflags_cb(struct ethercom *ec)
530 {
531 struct ifnet *ifp = &ec->ec_if;
532 struct adapter *adapter = ifp->if_softc;
533 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
534
535 IXGBE_CORE_LOCK(adapter);
536
537 if (change != 0)
538 adapter->if_flags = ifp->if_flags;
539
540 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
541 rc = ENETRESET;
542
543 IXGBE_CORE_UNLOCK(adapter);
544
545 return rc;
546 }
547
548 /*********************************************************************
549 * Ioctl entry point
550 *
551 * ixv_ioctl is called when the user wants to configure the
552 * interface.
553 *
554 * return 0 on success, positive on failure
555 **********************************************************************/
556
557 static int
558 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
559 {
560 struct adapter *adapter = ifp->if_softc;
561 struct ifcapreq *ifcr = data;
562 struct ifreq *ifr = (struct ifreq *) data;
563 int error = 0;
564 int l4csum_en;
565 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
566 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
567
568 switch (command) {
569 case SIOCSIFFLAGS:
570 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
571 break;
572 case SIOCADDMULTI:
573 case SIOCDELMULTI:
574 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
575 break;
576 case SIOCSIFMEDIA:
577 case SIOCGIFMEDIA:
578 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
579 break;
580 case SIOCSIFCAP:
581 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
582 break;
583 case SIOCSIFMTU:
584 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
585 break;
586 default:
587 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
588 break;
589 }
590
591 switch (command) {
592 case SIOCSIFMEDIA:
593 case SIOCGIFMEDIA:
594 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
595 case SIOCSIFCAP:
596 /* Layer-4 Rx checksum offload has to be turned on and
597 * off as a unit.
598 */
599 l4csum_en = ifcr->ifcr_capenable & l4csum;
600 if (l4csum_en != l4csum && l4csum_en != 0)
601 return EINVAL;
602 /*FALLTHROUGH*/
603 case SIOCADDMULTI:
604 case SIOCDELMULTI:
605 case SIOCSIFFLAGS:
606 case SIOCSIFMTU:
607 default:
608 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
609 return error;
610 if ((ifp->if_flags & IFF_RUNNING) == 0)
611 ;
612 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
613 IXGBE_CORE_LOCK(adapter);
614 ixv_init_locked(adapter);
615 IXGBE_CORE_UNLOCK(adapter);
616 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
617 /*
618 * Multicast list has changed; set the hardware filter
619 * accordingly.
620 */
621 IXGBE_CORE_LOCK(adapter);
622 ixv_disable_intr(adapter);
623 ixv_set_multi(adapter);
624 ixv_enable_intr(adapter);
625 IXGBE_CORE_UNLOCK(adapter);
626 }
627 return 0;
628 }
629 }
630
631 /*********************************************************************
632 * Init entry point
633 *
634 * This routine is used in two ways. It is used by the stack as
635 * init entry point in network interface structure. It is also used
636 * by the driver as a hw/sw initialization routine to get to a
637 * consistent state.
638 *
639 * return 0 on success, positive on failure
640 **********************************************************************/
641 #define IXGBE_MHADD_MFS_SHIFT 16
642
643 static void
644 ixv_init_locked(struct adapter *adapter)
645 {
646 struct ifnet *ifp = adapter->ifp;
647 device_t dev = adapter->dev;
648 struct ixgbe_hw *hw = &adapter->hw;
649 u32 mhadd, gpie;
650
651 INIT_DEBUGOUT("ixv_init: begin");
652 KASSERT(mutex_owned(&adapter->core_mtx));
653 hw->adapter_stopped = FALSE;
654 ixgbe_stop_adapter(hw);
655 callout_stop(&adapter->timer);
656
657 /* reprogram the RAR[0] in case user changed it. */
658 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
659
660 /* Get the latest mac address, User can use a LAA */
661 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
662 IXGBE_ETH_LENGTH_OF_ADDRESS);
663 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
664 hw->addr_ctrl.rar_used_count = 1;
665
666 /* Prepare transmit descriptors and buffers */
667 if (ixgbe_setup_transmit_structures(adapter)) {
668 aprint_error_dev(dev,"Could not setup transmit structures\n");
669 ixv_stop(adapter);
670 return;
671 }
672
673 ixgbe_reset_hw(hw);
674 ixv_initialize_transmit_units(adapter);
675
676 /* Setup Multicast table */
677 ixv_set_multi(adapter);
678
679 /*
680 ** Determine the correct mbuf pool
681 ** for doing jumbo/headersplit
682 */
683 if (ifp->if_mtu > ETHERMTU)
684 adapter->rx_mbuf_sz = MJUMPAGESIZE;
685 else
686 adapter->rx_mbuf_sz = MCLBYTES;
687
688 /* Prepare receive descriptors and buffers */
689 if (ixgbe_setup_receive_structures(adapter)) {
690 device_printf(dev,"Could not setup receive structures\n");
691 ixv_stop(adapter);
692 return;
693 }
694
695 /* Configure RX settings */
696 ixv_initialize_receive_units(adapter);
697
698 /* Enable Enhanced MSIX mode */
699 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
700 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
701 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
702 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
703
704 #if 0 /* XXX isn't it required? -- msaitoh */
705 /* Set the various hardware offload abilities */
706 ifp->if_hwassist = 0;
707 if (ifp->if_capenable & IFCAP_TSO4)
708 ifp->if_hwassist |= CSUM_TSO;
709 if (ifp->if_capenable & IFCAP_TXCSUM) {
710 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
711 #if __FreeBSD_version >= 800000
712 ifp->if_hwassist |= CSUM_SCTP;
713 #endif
714 }
715 #endif
716
717 /* Set MTU size */
718 if (ifp->if_mtu > ETHERMTU) {
719 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
720 mhadd &= ~IXGBE_MHADD_MFS_MASK;
721 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
722 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
723 }
724
725 /* Set up VLAN offload and filter */
726 ixv_setup_vlan_support(adapter);
727
728 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
729
730 /* Set up MSI/X routing */
731 ixv_configure_ivars(adapter);
732
733 /* Set up auto-mask */
734 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
735
736 /* Set moderation on the Link interrupt */
737 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
738
739 /* Stats init */
740 ixv_init_stats(adapter);
741
742 /* Config/Enable Link */
743 ixv_config_link(adapter);
744
745 /* And now turn on interrupts */
746 ixv_enable_intr(adapter);
747
748 /* Now inform the stack we're ready */
749 ifp->if_flags |= IFF_RUNNING;
750 ifp->if_flags &= ~IFF_OACTIVE;
751
752 return;
753 }
754
755 static int
756 ixv_init(struct ifnet *ifp)
757 {
758 struct adapter *adapter = ifp->if_softc;
759
760 IXGBE_CORE_LOCK(adapter);
761 ixv_init_locked(adapter);
762 IXGBE_CORE_UNLOCK(adapter);
763 return 0;
764 }
765
766
767 /*
768 **
769 ** MSIX Interrupt Handlers and Tasklets
770 **
771 */
772
773 static inline void
774 ixv_enable_queue(struct adapter *adapter, u32 vector)
775 {
776 struct ixgbe_hw *hw = &adapter->hw;
777 u32 queue = 1 << vector;
778 u32 mask;
779
780 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
781 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
782 }
783
784 static inline void
785 ixv_disable_queue(struct adapter *adapter, u32 vector)
786 {
787 struct ixgbe_hw *hw = &adapter->hw;
788 u64 queue = (u64)(1 << vector);
789 u32 mask;
790
791 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
792 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
793 }
794
795 static inline void
796 ixv_rearm_queues(struct adapter *adapter, u64 queues)
797 {
798 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
799 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
800 }
801
802
803 static void
804 ixv_handle_que(void *context)
805 {
806 struct ix_queue *que = context;
807 struct adapter *adapter = que->adapter;
808 struct tx_ring *txr = que->txr;
809 struct ifnet *ifp = adapter->ifp;
810 bool more;
811
812 if (ifp->if_flags & IFF_RUNNING) {
813 more = ixgbe_rxeof(que);
814 IXGBE_TX_LOCK(txr);
815 ixgbe_txeof(txr);
816 #if __FreeBSD_version >= 800000
817 if (!drbr_empty(ifp, txr->br))
818 ixgbe_mq_start_locked(ifp, txr);
819 #else
820 if (!IFQ_IS_EMPTY(&ifp->if_snd))
821 ixgbe_start_locked(txr, ifp);
822 #endif
823 IXGBE_TX_UNLOCK(txr);
824 if (more) {
825 adapter->req.ev_count++;
826 softint_schedule(que->que_si);
827 return;
828 }
829 }
830
831 /* Reenable this interrupt */
832 ixv_enable_queue(adapter, que->msix);
833 return;
834 }
835
836 /*********************************************************************
837 *
838 * MSI Queue Interrupt Service routine
839 *
840 **********************************************************************/
841 int
842 ixv_msix_que(void *arg)
843 {
844 struct ix_queue *que = arg;
845 struct adapter *adapter = que->adapter;
846 struct ifnet *ifp = adapter->ifp;
847 struct tx_ring *txr = que->txr;
848 struct rx_ring *rxr = que->rxr;
849 bool more;
850 u32 newitr = 0;
851
852 ixv_disable_queue(adapter, que->msix);
853 ++que->irqs.ev_count;
854
855 more = ixgbe_rxeof(que);
856
857 IXGBE_TX_LOCK(txr);
858 ixgbe_txeof(txr);
859 /*
860 ** Make certain that if the stack
861 ** has anything queued the task gets
862 ** scheduled to handle it.
863 */
864 #ifdef IXGBE_LEGACY_TX
865 if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
866 ixgbe_start_locked(txr, ifp);
867 #else
868 if (!drbr_empty(adapter->ifp, txr->br))
869 ixgbe_mq_start_locked(ifp, txr);
870 #endif
871 IXGBE_TX_UNLOCK(txr);
872
873 /* Do AIM now? */
874
875 if (ixv_enable_aim == FALSE)
876 goto no_calc;
877 /*
878 ** Do Adaptive Interrupt Moderation:
879 ** - Write out last calculated setting
880 ** - Calculate based on average size over
881 ** the last interval.
882 */
883 if (que->eitr_setting)
884 IXGBE_WRITE_REG(&adapter->hw,
885 IXGBE_VTEITR(que->msix),
886 que->eitr_setting);
887
888 que->eitr_setting = 0;
889
890 /* Idle, do nothing */
891 if ((txr->bytes == 0) && (rxr->bytes == 0))
892 goto no_calc;
893
894 if ((txr->bytes) && (txr->packets))
895 newitr = txr->bytes/txr->packets;
896 if ((rxr->bytes) && (rxr->packets))
897 newitr = max(newitr,
898 (rxr->bytes / rxr->packets));
899 newitr += 24; /* account for hardware frame, crc */
900
901 /* set an upper boundary */
902 newitr = min(newitr, 3000);
903
904 /* Be nice to the mid range */
905 if ((newitr > 300) && (newitr < 1200))
906 newitr = (newitr / 3);
907 else
908 newitr = (newitr / 2);
909
910 newitr |= newitr << 16;
911
912 /* save for next interrupt */
913 que->eitr_setting = newitr;
914
915 /* Reset state */
916 txr->bytes = 0;
917 txr->packets = 0;
918 rxr->bytes = 0;
919 rxr->packets = 0;
920
921 no_calc:
922 if (more)
923 softint_schedule(que->que_si);
924 else /* Reenable this interrupt */
925 ixv_enable_queue(adapter, que->msix);
926 return 1;
927 }
928
929 static int
930 ixv_msix_mbx(void *arg)
931 {
932 struct adapter *adapter = arg;
933 struct ixgbe_hw *hw = &adapter->hw;
934 u32 reg;
935
936 ++adapter->link_irq.ev_count;
937
938 /* First get the cause */
939 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
940 /* Clear interrupt with write */
941 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
942
943 /* Link status change */
944 if (reg & IXGBE_EICR_LSC)
945 softint_schedule(adapter->link_si);
946
947 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
948 return 1;
949 }
950
951 /*********************************************************************
952 *
953 * Media Ioctl callback
954 *
955 * This routine is called whenever the user queries the status of
956 * the interface using ifconfig.
957 *
958 **********************************************************************/
959 static void
960 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
961 {
962 struct adapter *adapter = ifp->if_softc;
963
964 INIT_DEBUGOUT("ixv_media_status: begin");
965 IXGBE_CORE_LOCK(adapter);
966 ixv_update_link_status(adapter);
967
968 ifmr->ifm_status = IFM_AVALID;
969 ifmr->ifm_active = IFM_ETHER;
970
971 if (!adapter->link_active) {
972 IXGBE_CORE_UNLOCK(adapter);
973 return;
974 }
975
976 ifmr->ifm_status |= IFM_ACTIVE;
977
978 switch (adapter->link_speed) {
979 case IXGBE_LINK_SPEED_1GB_FULL:
980 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
981 break;
982 case IXGBE_LINK_SPEED_10GB_FULL:
983 ifmr->ifm_active |= IFM_FDX;
984 break;
985 }
986
987 IXGBE_CORE_UNLOCK(adapter);
988
989 return;
990 }
991
992 /*********************************************************************
993 *
994 * Media Ioctl callback
995 *
996 * This routine is called when the user changes speed/duplex using
997 * media/mediopt option with ifconfig.
998 *
999 **********************************************************************/
1000 static int
1001 ixv_media_change(struct ifnet * ifp)
1002 {
1003 struct adapter *adapter = ifp->if_softc;
1004 struct ifmedia *ifm = &adapter->media;
1005
1006 INIT_DEBUGOUT("ixv_media_change: begin");
1007
1008 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1009 return (EINVAL);
1010
1011 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1012 case IFM_AUTO:
1013 break;
1014 default:
1015 device_printf(adapter->dev, "Only auto media type\n");
1016 return (EINVAL);
1017 }
1018
1019 return (0);
1020 }
1021
1022
1023 /*********************************************************************
1024 * Multicast Update
1025 *
1026 * This routine is called whenever multicast address list is updated.
1027 *
1028 **********************************************************************/
1029 #define IXGBE_RAR_ENTRIES 16
1030
1031 static void
1032 ixv_set_multi(struct adapter *adapter)
1033 {
1034 struct ether_multi *enm;
1035 struct ether_multistep step;
1036 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1037 u8 *update_ptr;
1038 int mcnt = 0;
1039 struct ethercom *ec = &adapter->osdep.ec;
1040
1041 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1042
1043 ETHER_FIRST_MULTI(step, ec, enm);
1044 while (enm != NULL) {
1045 bcopy(enm->enm_addrlo,
1046 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1047 IXGBE_ETH_LENGTH_OF_ADDRESS);
1048 mcnt++;
1049 /* XXX This might be required --msaitoh */
1050 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1051 break;
1052 ETHER_NEXT_MULTI(step, enm);
1053 }
1054
1055 update_ptr = mta;
1056
1057 ixgbe_update_mc_addr_list(&adapter->hw,
1058 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1059
1060 return;
1061 }
1062
1063 /*
1064 * This is an iterator function now needed by the multicast
1065 * shared code. It simply feeds the shared code routine the
1066 * addresses in the array of ixv_set_multi() one by one.
1067 */
1068 static u8 *
1069 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1070 {
1071 u8 *addr = *update_ptr;
1072 u8 *newptr;
1073 *vmdq = 0;
1074
1075 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1076 *update_ptr = newptr;
1077 return addr;
1078 }
1079
1080 /*********************************************************************
1081 * Timer routine
1082 *
1083 * This routine checks for link status,updates statistics,
1084 * and runs the watchdog check.
1085 *
1086 **********************************************************************/
1087
1088 static void
1089 ixv_local_timer(void *arg)
1090 {
1091 struct adapter *adapter = arg;
1092
1093 IXGBE_CORE_LOCK(adapter);
1094 ixv_local_timer_locked(adapter);
1095 IXGBE_CORE_UNLOCK(adapter);
1096 }
1097
1098 static void
1099 ixv_local_timer_locked(void *arg)
1100 {
1101 struct adapter *adapter = arg;
1102 device_t dev = adapter->dev;
1103 struct ix_queue *que = adapter->queues;
1104 u64 queues = 0;
1105 int hung = 0;
1106
1107 KASSERT(mutex_owned(&adapter->core_mtx));
1108
1109 ixv_update_link_status(adapter);
1110
1111 /* Stats Update */
1112 ixv_update_stats(adapter);
1113
1114 /*
1115 ** Check the TX queues status
1116 ** - mark hung queues so we don't schedule on them
1117 ** - watchdog only if all queues show hung
1118 */
1119 for (int i = 0; i < adapter->num_queues; i++, que++) {
1120 /* Keep track of queues with work for soft irq */
1121 if (que->txr->busy)
1122 queues |= ((u64)1 << que->me);
1123 /*
1124 ** Each time txeof runs without cleaning, but there
1125 ** are uncleaned descriptors it increments busy. If
1126 ** we get to the MAX we declare it hung.
1127 */
1128 if (que->busy == IXGBE_QUEUE_HUNG) {
1129 ++hung;
1130 /* Mark the queue as inactive */
1131 adapter->active_queues &= ~((u64)1 << que->me);
1132 continue;
1133 } else {
1134 /* Check if we've come back from hung */
1135 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1136 adapter->active_queues |= ((u64)1 << que->me);
1137 }
1138 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1139 device_printf(dev,"Warning queue %d "
1140 "appears to be hung!\n", i);
1141 que->txr->busy = IXGBE_QUEUE_HUNG;
1142 ++hung;
1143 }
1144
1145 }
1146
1147 /* Only truely watchdog if all queues show hung */
1148 if (hung == adapter->num_queues)
1149 goto watchdog;
1150 else if (queues != 0) { /* Force an IRQ on queues with work */
1151 ixv_rearm_queues(adapter, queues);
1152 }
1153
1154 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1155 return;
1156
1157 watchdog:
1158 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1159 adapter->ifp->if_flags &= ~IFF_RUNNING;
1160 adapter->watchdog_events.ev_count++;
1161 ixv_init_locked(adapter);
1162 }
1163
1164 /*
1165 ** Note: this routine updates the OS on the link state
1166 ** the real check of the hardware only happens with
1167 ** a link interrupt.
1168 */
1169 static void
1170 ixv_update_link_status(struct adapter *adapter)
1171 {
1172 struct ifnet *ifp = adapter->ifp;
1173 device_t dev = adapter->dev;
1174
1175 if (adapter->link_up){
1176 if (adapter->link_active == FALSE) {
1177 if (bootverbose)
1178 device_printf(dev,"Link is up %d Gbps %s \n",
1179 ((adapter->link_speed == 128)? 10:1),
1180 "Full Duplex");
1181 adapter->link_active = TRUE;
1182 if_link_state_change(ifp, LINK_STATE_UP);
1183 }
1184 } else { /* Link down */
1185 if (adapter->link_active == TRUE) {
1186 if (bootverbose)
1187 device_printf(dev,"Link is Down\n");
1188 if_link_state_change(ifp, LINK_STATE_DOWN);
1189 adapter->link_active = FALSE;
1190 }
1191 }
1192
1193 return;
1194 }
1195
1196
1197 static void
1198 ixv_ifstop(struct ifnet *ifp, int disable)
1199 {
1200 struct adapter *adapter = ifp->if_softc;
1201
1202 IXGBE_CORE_LOCK(adapter);
1203 ixv_stop(adapter);
1204 IXGBE_CORE_UNLOCK(adapter);
1205 }
1206
1207 /*********************************************************************
1208 *
1209 * This routine disables all traffic on the adapter by issuing a
1210 * global reset on the MAC and deallocates TX/RX buffers.
1211 *
1212 **********************************************************************/
1213
1214 static void
1215 ixv_stop(void *arg)
1216 {
1217 struct ifnet *ifp;
1218 struct adapter *adapter = arg;
1219 struct ixgbe_hw *hw = &adapter->hw;
1220 ifp = adapter->ifp;
1221
1222 KASSERT(mutex_owned(&adapter->core_mtx));
1223
1224 INIT_DEBUGOUT("ixv_stop: begin\n");
1225 ixv_disable_intr(adapter);
1226
1227 /* Tell the stack that the interface is no longer active */
1228 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1229
1230 ixgbe_reset_hw(hw);
1231 adapter->hw.adapter_stopped = FALSE;
1232 ixgbe_stop_adapter(hw);
1233 callout_stop(&adapter->timer);
1234
1235 /* reprogram the RAR[0] in case user changed it. */
1236 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1237
1238 return;
1239 }
1240
1241
1242 /*********************************************************************
1243 *
1244 * Determine hardware revision.
1245 *
1246 **********************************************************************/
1247 static void
1248 ixv_identify_hardware(struct adapter *adapter)
1249 {
1250 pcitag_t tag;
1251 pci_chipset_tag_t pc;
1252 pcireg_t subid, id;
1253 struct ixgbe_hw *hw = &adapter->hw;
1254
1255 pc = adapter->osdep.pc;
1256 tag = adapter->osdep.tag;
1257
1258 /*
1259 ** Make sure BUSMASTER is set, on a VM under
1260 ** KVM it may not be and will break things.
1261 */
1262 ixgbe_pci_enable_busmaster(pc, tag);
1263
1264 id = pci_conf_read(pc, tag, PCI_ID_REG);
1265 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1266
1267 /* Save off the information about this board */
1268 hw->vendor_id = PCI_VENDOR(id);
1269 hw->device_id = PCI_PRODUCT(id);
1270 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1271 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1272 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1273
1274 /* We need this to determine device-specific things */
1275 ixgbe_set_mac_type(hw);
1276
1277 /* Set the right number of segments */
1278 adapter->num_segs = IXGBE_82599_SCATTER;
1279
1280 return;
1281 }
1282
1283 /*********************************************************************
1284 *
1285 * Setup MSIX Interrupt resources and handlers
1286 *
1287 **********************************************************************/
1288 static int
1289 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1290 {
1291 device_t dev = adapter->dev;
1292 struct ix_queue *que = adapter->queues;
1293 struct tx_ring *txr = adapter->tx_rings;
1294 int error, rid, vector = 0;
1295 pci_chipset_tag_t pc;
1296 pcitag_t tag;
1297 char intrbuf[PCI_INTRSTR_LEN];
1298 const char *intrstr = NULL;
1299 kcpuset_t *affinity;
1300 int cpu_id = 0;
1301
1302 pc = adapter->osdep.pc;
1303 tag = adapter->osdep.tag;
1304
1305 if (pci_msix_alloc_exact(pa,
1306 &adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
1307 return (ENXIO);
1308
1309 kcpuset_create(&affinity, false);
1310 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1311 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1312 sizeof(intrbuf));
1313 #ifdef IXV_MPSAFE
1314 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1315 true);
1316 #endif
1317 /* Set the handler function */
1318 adapter->osdep.ihs[i] = pci_intr_establish(pc,
1319 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
1320 if (adapter->osdep.ihs[i] == NULL) {
1321 que->res = NULL;
1322 aprint_error_dev(dev,
1323 "Failed to register QUE handler");
1324 kcpuset_destroy(affinity);
1325 return (ENXIO);
1326 }
1327 que->msix = vector;
1328 adapter->active_queues |= (u64)(1 << que->msix);
1329
1330 cpu_id = i;
1331 /* Round-robin affinity */
1332 kcpuset_zero(affinity);
1333 kcpuset_set(affinity, cpu_id % ncpu);
1334 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1335 NULL);
1336 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1337 intrstr);
1338 if (error == 0)
1339 aprint_normal(", bound queue %d to cpu %d\n",
1340 i, cpu_id);
1341 else
1342 aprint_normal("\n");
1343
1344 #ifndef IXGBE_LEGACY_TX
1345 txr->txq_si = softint_establish(SOFTINT_NET,
1346 ixgbe_deferred_mq_start, txr);
1347 #endif
1348 que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1349 que);
1350 if (que->que_si == NULL) {
1351 aprint_error_dev(dev,
1352 "could not establish software interrupt\n");
1353 }
1354 }
1355
1356 /* and Mailbox */
1357 cpu_id++;
1358 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1359 sizeof(intrbuf));
1360 #ifdef IXG_MPSAFE
1361 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
1362 #endif
1363 /* Set the mbx handler function */
1364 adapter->osdep.ihs[vector] = pci_intr_establish(pc,
1365 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
1366 if (adapter->osdep.ihs[vector] == NULL) {
1367 adapter->res = NULL;
1368 aprint_error_dev(dev, "Failed to register LINK handler\n");
1369 kcpuset_destroy(affinity);
1370 return (ENXIO);
1371 }
1372 /* Round-robin affinity */
1373 kcpuset_zero(affinity);
1374 kcpuset_set(affinity, cpu_id % ncpu);
1375 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1376
1377 aprint_normal_dev(dev,
1378 "for link, interrupting at %s, ", intrstr);
1379 if (error == 0) {
1380 aprint_normal("affinity to cpu %d\n", cpu_id);
1381 }
1382 adapter->vector = vector;
1383 /* Tasklets for Mailbox */
1384 adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1385 adapter);
1386 /*
1387 ** Due to a broken design QEMU will fail to properly
1388 ** enable the guest for MSIX unless the vectors in
1389 ** the table are all set up, so we must rewrite the
1390 ** ENABLE in the MSIX control register again at this
1391 ** point to cause it to successfully initialize us.
1392 */
1393 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1394 int msix_ctrl;
1395 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1396 rid += PCI_MSIX_CTL;
1397 msix_ctrl = pci_conf_read(pc, tag, rid);
1398 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1399 pci_conf_write(pc, tag, rid, msix_ctrl);
1400 }
1401
1402 return (0);
1403 }
1404
1405 /*
1406 * Setup MSIX resources, note that the VF
1407 * device MUST use MSIX, there is no fallback.
1408 */
1409 static int
1410 ixv_setup_msix(struct adapter *adapter)
1411 {
1412 device_t dev = adapter->dev;
1413 int want, msgs;
1414
1415 /*
1416 ** Want two vectors: one for a queue,
1417 ** plus an additional for mailbox.
1418 */
1419 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1420 if (msgs < IXG_MSIX_NINTR) {
1421 aprint_error_dev(dev,"MSIX config error\n");
1422 return (ENXIO);
1423 }
1424 want = MIN(msgs, IXG_MSIX_NINTR);
1425
1426 adapter->msix_mem = (void *)1; /* XXX */
1427 aprint_normal_dev(dev,
1428 "Using MSIX interrupts with %d vectors\n", msgs);
1429 return (want);
1430 }
1431
1432
1433 static int
1434 ixv_allocate_pci_resources(struct adapter *adapter,
1435 const struct pci_attach_args *pa)
1436 {
1437 pcireg_t memtype;
1438 device_t dev = adapter->dev;
1439 bus_addr_t addr;
1440 int flags;
1441
1442 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1443
1444 switch (memtype) {
1445 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1446 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1447 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1448 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1449 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1450 goto map_err;
1451 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1452 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1453 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1454 }
1455 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1456 adapter->osdep.mem_size, flags,
1457 &adapter->osdep.mem_bus_space_handle) != 0) {
1458 map_err:
1459 adapter->osdep.mem_size = 0;
1460 aprint_error_dev(dev, "unable to map BAR0\n");
1461 return ENXIO;
1462 }
1463 break;
1464 default:
1465 aprint_error_dev(dev, "unexpected type on BAR0\n");
1466 return ENXIO;
1467 }
1468
1469 /* Pick up the tuneable queues */
1470 adapter->num_queues = ixv_num_queues;
1471
1472 adapter->hw.back = &adapter->osdep;
1473
1474 /*
1475 ** Now setup MSI/X, should
1476 ** return us the number of
1477 ** configured vectors.
1478 */
1479 adapter->msix = ixv_setup_msix(adapter);
1480 if (adapter->msix == ENXIO)
1481 return (ENXIO);
1482 else
1483 return (0);
1484 }
1485
1486 static void
1487 ixv_free_pci_resources(struct adapter * adapter)
1488 {
1489 struct ix_queue *que = adapter->queues;
1490 int rid;
1491
1492 /*
1493 ** Release all msix queue resources:
1494 */
1495 for (int i = 0; i < adapter->num_queues; i++, que++) {
1496 rid = que->msix + 1;
1497 if (que->res != NULL)
1498 pci_intr_disestablish(adapter->osdep.pc,
1499 adapter->osdep.ihs[i]);
1500 }
1501
1502
1503 /* Clean the Legacy or Link interrupt last */
1504 if (adapter->vector) /* we are doing MSIX */
1505 rid = adapter->vector + 1;
1506 else
1507 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1508
1509 if (adapter->osdep.ihs[rid] != NULL)
1510 pci_intr_disestablish(adapter->osdep.pc,
1511 adapter->osdep.ihs[rid]);
1512 adapter->osdep.ihs[rid] = NULL;
1513
1514 #if defined(NETBSD_MSI_OR_MSIX)
1515 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1516 adapter->osdep.nintrs);
1517 #endif
1518
1519 if (adapter->osdep.mem_size != 0) {
1520 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1521 adapter->osdep.mem_bus_space_handle,
1522 adapter->osdep.mem_size);
1523 }
1524
1525 return;
1526 }
1527
1528 /*********************************************************************
1529 *
1530 * Setup networking device structure and register an interface.
1531 *
1532 **********************************************************************/
1533 static void
1534 ixv_setup_interface(device_t dev, struct adapter *adapter)
1535 {
1536 struct ethercom *ec = &adapter->osdep.ec;
1537 struct ifnet *ifp;
1538
1539 INIT_DEBUGOUT("ixv_setup_interface: begin");
1540
1541 ifp = adapter->ifp = &ec->ec_if;
1542 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1543 ifp->if_baudrate = 1000000000;
1544 ifp->if_init = ixv_init;
1545 ifp->if_stop = ixv_ifstop;
1546 ifp->if_softc = adapter;
1547 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1548 ifp->if_ioctl = ixv_ioctl;
1549 #if __FreeBSD_version >= 800000
1550 ifp->if_transmit = ixgbe_mq_start;
1551 ifp->if_qflush = ixgbe_qflush;
1552 #else
1553 ifp->if_start = ixgbe_start;
1554 #endif
1555 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1556
1557 if_attach(ifp);
1558 ether_ifattach(ifp, adapter->hw.mac.addr);
1559 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1560
1561 adapter->max_frame_size =
1562 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1563
1564 /*
1565 * Tell the upper layer(s) we support long frames.
1566 */
1567 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1568
1569 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1570 ifp->if_capenable = 0;
1571
1572 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1573 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1574 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1575 | ETHERCAP_VLAN_MTU;
1576 ec->ec_capenable = ec->ec_capabilities;
1577
1578 /* Don't enable LRO by default */
1579 ifp->if_capabilities |= IFCAP_LRO;
1580 #if 0
1581 ifp->if_capenable = ifp->if_capabilities;
1582 #endif
1583
1584 /*
1585 ** Dont turn this on by default, if vlans are
1586 ** created on another pseudo device (eg. lagg)
1587 ** then vlan events are not passed thru, breaking
1588 ** operation, but with HW FILTER off it works. If
1589 ** using vlans directly on the em driver you can
1590 ** enable this and get full hardware tag filtering.
1591 */
1592 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1593
1594 /*
1595 * Specify the media types supported by this adapter and register
1596 * callbacks to update media and link information
1597 */
1598 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1599 ixv_media_status);
1600 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1601 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1602 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1603
1604 return;
1605 }
1606
1607 static void
1608 ixv_config_link(struct adapter *adapter)
1609 {
1610 struct ixgbe_hw *hw = &adapter->hw;
1611 u32 autoneg, err = 0;
1612
1613 if (hw->mac.ops.check_link)
1614 err = hw->mac.ops.check_link(hw, &autoneg,
1615 &adapter->link_up, FALSE);
1616 if (err)
1617 goto out;
1618
1619 if (hw->mac.ops.setup_link)
1620 err = hw->mac.ops.setup_link(hw,
1621 autoneg, adapter->link_up);
1622 out:
1623 return;
1624 }
1625
1626
1627 /*********************************************************************
1628 *
1629 * Enable transmit unit.
1630 *
1631 **********************************************************************/
1632 static void
1633 ixv_initialize_transmit_units(struct adapter *adapter)
1634 {
1635 struct tx_ring *txr = adapter->tx_rings;
1636 struct ixgbe_hw *hw = &adapter->hw;
1637
1638
1639 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1640 u64 tdba = txr->txdma.dma_paddr;
1641 u32 txctrl, txdctl;
1642
1643 /* Set WTHRESH to 8, burst writeback */
1644 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1645 txdctl |= (8 << 16);
1646 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1647
1648 /* Set the HW Tx Head and Tail indices */
1649 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1650 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1651
1652 /* Set Tx Tail register */
1653 txr->tail = IXGBE_VFTDT(i);
1654
1655 /* Set the processing limit */
1656 txr->process_limit = ixv_tx_process_limit;
1657
1658 /* Set Ring parameters */
1659 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1660 (tdba & 0x00000000ffffffffULL));
1661 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1662 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1663 adapter->num_tx_desc *
1664 sizeof(struct ixgbe_legacy_tx_desc));
1665 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1666 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1667 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1668
1669 /* Now enable */
1670 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1671 txdctl |= IXGBE_TXDCTL_ENABLE;
1672 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1673 }
1674
1675 return;
1676 }
1677
1678
1679 /*********************************************************************
1680 *
1681 * Setup receive registers and features.
1682 *
1683 **********************************************************************/
1684 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1685
1686 static void
1687 ixv_initialize_receive_units(struct adapter *adapter)
1688 {
1689 struct rx_ring *rxr = adapter->rx_rings;
1690 struct ixgbe_hw *hw = &adapter->hw;
1691 struct ifnet *ifp = adapter->ifp;
1692 u32 bufsz, rxcsum, psrtype;
1693 int max_frame;
1694
1695 if (ifp->if_mtu > ETHERMTU)
1696 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1697 else
1698 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1699
1700 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1701 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1702 IXGBE_PSRTYPE_L2HDR;
1703
1704 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1705
1706 /* Tell PF our expected packet-size */
1707 max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
1708 ixgbevf_rlpml_set_vf(hw, max_frame);
1709
1710 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1711 u64 rdba = rxr->rxdma.dma_paddr;
1712 u32 reg, rxdctl;
1713
1714 /* Disable the queue */
1715 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1716 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
1717 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1718 for (int j = 0; j < 10; j++) {
1719 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1720 IXGBE_RXDCTL_ENABLE)
1721 msec_delay(1);
1722 else
1723 break;
1724 }
1725 wmb();
1726 /* Setup the Base and Length of the Rx Descriptor Ring */
1727 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1728 (rdba & 0x00000000ffffffffULL));
1729 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1730 (rdba >> 32));
1731 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1732 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1733
1734 /* Reset the ring indices */
1735 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1736 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1737
1738 /* Set up the SRRCTL register */
1739 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1740 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1741 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1742 reg |= bufsz;
1743 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1744 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1745
1746 /* Set the processing limit */
1747 rxr->process_limit = ixv_rx_process_limit;
1748
1749 /* Capture Rx Tail index */
1750 rxr->tail = IXGBE_VFRDT(rxr->me);
1751
1752 /* Do the queue enabling last */
1753 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1754 rxdctl |= IXGBE_RXDCTL_ENABLE;
1755 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1756 for (int k = 0; k < 10; k++) {
1757 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1758 IXGBE_RXDCTL_ENABLE)
1759 break;
1760 else
1761 msec_delay(1);
1762 }
1763 wmb();
1764
1765 /* Set the Tail Pointer */
1766 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1767 adapter->num_rx_desc - 1);
1768 }
1769
1770 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1771
1772 if (ifp->if_capenable & IFCAP_RXCSUM)
1773 rxcsum |= IXGBE_RXCSUM_PCSD;
1774
1775 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1776 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1777
1778 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1779
1780 return;
1781 }
1782
1783 static void
1784 ixv_setup_vlan_support(struct adapter *adapter)
1785 {
1786 struct ixgbe_hw *hw = &adapter->hw;
1787 u32 ctrl, vid, vfta, retry;
1788
1789
1790 /*
1791 ** We get here thru init_locked, meaning
1792 ** a soft reset, this has already cleared
1793 ** the VFTA and other state, so if there
1794 ** have been no vlan's registered do nothing.
1795 */
1796 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1797 return;
1798
1799 /* Enable the queues */
1800 for (int i = 0; i < adapter->num_queues; i++) {
1801 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1802 ctrl |= IXGBE_RXDCTL_VME;
1803 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1804 }
1805
1806 /*
1807 ** A soft reset zero's out the VFTA, so
1808 ** we need to repopulate it now.
1809 */
1810 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1811 if (ixv_shadow_vfta[i] == 0)
1812 continue;
1813 vfta = ixv_shadow_vfta[i];
1814 /*
1815 ** Reconstruct the vlan id's
1816 ** based on the bits set in each
1817 ** of the array ints.
1818 */
1819 for ( int j = 0; j < 32; j++) {
1820 retry = 0;
1821 if ((vfta & (1 << j)) == 0)
1822 continue;
1823 vid = (i * 32) + j;
1824 /* Call the shared code mailbox routine */
1825 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1826 if (++retry > 5)
1827 break;
1828 }
1829 }
1830 }
1831 }
1832
1833 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1834 /*
1835 ** This routine is run via an vlan config EVENT,
1836 ** it enables us to use the HW Filter table since
1837 ** we can get the vlan id. This just creates the
1838 ** entry in the soft version of the VFTA, init will
1839 ** repopulate the real table.
1840 */
1841 static void
1842 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1843 {
1844 struct adapter *adapter = ifp->if_softc;
1845 u16 index, bit;
1846
1847 if (ifp->if_softc != arg) /* Not our event */
1848 return;
1849
1850 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1851 return;
1852
1853 IXGBE_CORE_LOCK(adapter);
1854 index = (vtag >> 5) & 0x7F;
1855 bit = vtag & 0x1F;
1856 ixv_shadow_vfta[index] |= (1 << bit);
1857 /* Re-init to load the changes */
1858 ixv_init_locked(adapter);
1859 IXGBE_CORE_UNLOCK(adapter);
1860 }
1861
1862 /*
1863 ** This routine is run via an vlan
1864 ** unconfig EVENT, remove our entry
1865 ** in the soft vfta.
1866 */
1867 static void
1868 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1869 {
1870 struct adapter *adapter = ifp->if_softc;
1871 u16 index, bit;
1872
1873 if (ifp->if_softc != arg)
1874 return;
1875
1876 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1877 return;
1878
1879 IXGBE_CORE_LOCK(adapter);
1880 index = (vtag >> 5) & 0x7F;
1881 bit = vtag & 0x1F;
1882 ixv_shadow_vfta[index] &= ~(1 << bit);
1883 /* Re-init to load the changes */
1884 ixv_init_locked(adapter);
1885 IXGBE_CORE_UNLOCK(adapter);
1886 }
1887 #endif
1888
1889 static void
1890 ixv_enable_intr(struct adapter *adapter)
1891 {
1892 struct ixgbe_hw *hw = &adapter->hw;
1893 struct ix_queue *que = adapter->queues;
1894 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1895
1896
1897 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1898
1899 mask = IXGBE_EIMS_ENABLE_MASK;
1900 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1901 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1902
1903 for (int i = 0; i < adapter->num_queues; i++, que++)
1904 ixv_enable_queue(adapter, que->msix);
1905
1906 IXGBE_WRITE_FLUSH(hw);
1907
1908 return;
1909 }
1910
1911 static void
1912 ixv_disable_intr(struct adapter *adapter)
1913 {
1914 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1915 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1916 IXGBE_WRITE_FLUSH(&adapter->hw);
1917 return;
1918 }
1919
1920 /*
1921 ** Setup the correct IVAR register for a particular MSIX interrupt
1922 ** - entry is the register array entry
1923 ** - vector is the MSIX vector for this queue
1924 ** - type is RX/TX/MISC
1925 */
1926 static void
1927 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1928 {
1929 struct ixgbe_hw *hw = &adapter->hw;
1930 u32 ivar, index;
1931
1932 vector |= IXGBE_IVAR_ALLOC_VAL;
1933
1934 if (type == -1) { /* MISC IVAR */
1935 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1936 ivar &= ~0xFF;
1937 ivar |= vector;
1938 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1939 } else { /* RX/TX IVARS */
1940 index = (16 * (entry & 1)) + (8 * type);
1941 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1942 ivar &= ~(0xFF << index);
1943 ivar |= (vector << index);
1944 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1945 }
1946 }
1947
1948 static void
1949 ixv_configure_ivars(struct adapter *adapter)
1950 {
1951 struct ix_queue *que = adapter->queues;
1952
1953 for (int i = 0; i < adapter->num_queues; i++, que++) {
1954 /* First the RX queue entry */
1955 ixv_set_ivar(adapter, i, que->msix, 0);
1956 /* ... and the TX */
1957 ixv_set_ivar(adapter, i, que->msix, 1);
1958 /* Set an initial value in EITR */
1959 IXGBE_WRITE_REG(&adapter->hw,
1960 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1961 }
1962
1963 /* For the mailbox interrupt */
1964 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1965 }
1966
1967
1968 /*
1969 ** Tasklet handler for MSIX MBX interrupts
1970 ** - do outside interrupt since it might sleep
1971 */
1972 static void
1973 ixv_handle_mbx(void *context)
1974 {
1975 struct adapter *adapter = context;
1976
1977 ixgbe_check_link(&adapter->hw,
1978 &adapter->link_speed, &adapter->link_up, 0);
1979 ixv_update_link_status(adapter);
1980 }
1981
1982 /*
1983 ** The VF stats registers never have a truely virgin
1984 ** starting point, so this routine tries to make an
1985 ** artificial one, marking ground zero on attach as
1986 ** it were.
1987 */
1988 static void
1989 ixv_save_stats(struct adapter *adapter)
1990 {
1991 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1992
1993 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
1994 stats->saved_reset_vfgprc +=
1995 stats->vfgprc.ev_count - stats->base_vfgprc;
1996 stats->saved_reset_vfgptc +=
1997 stats->vfgptc.ev_count - stats->base_vfgptc;
1998 stats->saved_reset_vfgorc +=
1999 stats->vfgorc.ev_count - stats->base_vfgorc;
2000 stats->saved_reset_vfgotc +=
2001 stats->vfgotc.ev_count - stats->base_vfgotc;
2002 stats->saved_reset_vfmprc +=
2003 stats->vfmprc.ev_count - stats->base_vfmprc;
2004 }
2005 }
2006
2007 static void
2008 ixv_init_stats(struct adapter *adapter)
2009 {
2010 struct ixgbe_hw *hw = &adapter->hw;
2011
2012 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2013 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2014 adapter->stats.vf.last_vfgorc |=
2015 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2016
2017 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2018 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2019 adapter->stats.vf.last_vfgotc |=
2020 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2021
2022 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2023
2024 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2025 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2026 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2027 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2028 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2029 }
2030
2031 #define UPDATE_STAT_32(reg, last, count) \
2032 { \
2033 u32 current = IXGBE_READ_REG(hw, reg); \
2034 if (current < last) \
2035 count.ev_count += 0x100000000LL; \
2036 last = current; \
2037 count.ev_count &= 0xFFFFFFFF00000000LL; \
2038 count.ev_count |= current; \
2039 }
2040
2041 #define UPDATE_STAT_36(lsb, msb, last, count) \
2042 { \
2043 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2044 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2045 u64 current = ((cur_msb << 32) | cur_lsb); \
2046 if (current < last) \
2047 count.ev_count += 0x1000000000LL; \
2048 last = current; \
2049 count.ev_count &= 0xFFFFFFF000000000LL; \
2050 count.ev_count |= current; \
2051 }
2052
2053 /*
2054 ** ixv_update_stats - Update the board statistics counters.
2055 */
2056 void
2057 ixv_update_stats(struct adapter *adapter)
2058 {
2059 struct ixgbe_hw *hw = &adapter->hw;
2060
2061 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2062 adapter->stats.vf.vfgprc);
2063 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2064 adapter->stats.vf.vfgptc);
2065 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2066 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2067 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2068 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2069 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2070 adapter->stats.vf.vfmprc);
2071 }
2072
2073 /*
2074 * Add statistic sysctls for the VF.
2075 */
2076 static void
2077 ixv_add_stats_sysctls(struct adapter *adapter)
2078 {
2079 device_t dev = adapter->dev;
2080 struct ix_queue *que = &adapter->queues[0];
2081 struct tx_ring *txr = que->txr;
2082 struct rx_ring *rxr = que->rxr;
2083
2084 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2085
2086 const char *xname = device_xname(dev);
2087
2088 /* Driver Statistics */
2089 evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
2090 NULL, xname, "Driver dropped packets");
2091 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2092 NULL, xname, "m_defrag() failed");
2093 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2094 NULL, xname, "Watchdog timeouts");
2095
2096 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2097 xname, "Good Packets Received");
2098 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2099 xname, "Good Octets Received");
2100 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2101 xname, "Multicast Packets Received");
2102 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2103 xname, "Good Packets Transmitted");
2104 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2105 xname, "Good Octets Transmitted");
2106 evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
2107 xname, "IRQs on queue");
2108 evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
2109 xname, "RX irqs on queue");
2110 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
2111 xname, "RX packets");
2112 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
2113 xname, "RX bytes");
2114 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
2115 xname, "Discarded RX packets");
2116 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
2117 xname, "TX Packets");
2118 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
2119 xname, "# of times not enough descriptors were available during TX");
2120 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
2121 xname, "TX TSO");
2122 }
2123
2124 /**********************************************************************
2125 *
2126 * This routine is called only when em_display_debug_stats is enabled.
2127 * This routine provides a way to take a look at important statistics
2128 * maintained by the driver and hardware.
2129 *
2130 **********************************************************************/
2131 static void
2132 ixv_print_debug_info(struct adapter *adapter)
2133 {
2134 device_t dev = adapter->dev;
2135 struct ixgbe_hw *hw = &adapter->hw;
2136 struct ix_queue *que = adapter->queues;
2137 struct rx_ring *rxr;
2138 struct tx_ring *txr;
2139 #ifdef LRO
2140 struct lro_ctrl *lro;
2141 #endif /* LRO */
2142
2143 device_printf(dev,"Error Byte Count = %u \n",
2144 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2145
2146 for (int i = 0; i < adapter->num_queues; i++, que++) {
2147 txr = que->txr;
2148 rxr = que->rxr;
2149 #ifdef LRO
2150 lro = &rxr->lro;
2151 #endif /* LRO */
2152 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2153 que->msix, (long)que->irqs.ev_count);
2154 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2155 rxr->me, (long long)rxr->rx_packets.ev_count);
2156 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2157 rxr->me, (long)rxr->rx_bytes.ev_count);
2158 #ifdef LRO
2159 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2160 rxr->me, lro->lro_queued);
2161 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2162 rxr->me, lro->lro_flushed);
2163 #endif /* LRO */
2164 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2165 txr->me, (long)txr->total_packets.ev_count);
2166 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2167 txr->me, (long)txr->no_desc_avail.ev_count);
2168 }
2169
2170 device_printf(dev,"MBX IRQ Handled: %lu\n",
2171 (long)adapter->link_irq.ev_count);
2172 return;
2173 }
2174
2175 static int
2176 ixv_sysctl_debug(SYSCTLFN_ARGS)
2177 {
2178 struct sysctlnode node;
2179 int error, result;
2180 struct adapter *adapter;
2181
2182 node = *rnode;
2183 adapter = (struct adapter *)node.sysctl_data;
2184 node.sysctl_data = &result;
2185 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2186
2187 if (error)
2188 return error;
2189
2190 if (result == 1)
2191 ixv_print_debug_info(adapter);
2192
2193 return 0;
2194 }
2195
2196 const struct sysctlnode *
2197 ixv_sysctl_instance(struct adapter *adapter)
2198 {
2199 const char *dvname;
2200 struct sysctllog **log;
2201 int rc;
2202 const struct sysctlnode *rnode;
2203
2204 log = &adapter->sysctllog;
2205 dvname = device_xname(adapter->dev);
2206
2207 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2208 0, CTLTYPE_NODE, dvname,
2209 SYSCTL_DESCR("ixv information and settings"),
2210 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2211 goto err;
2212
2213 return rnode;
2214 err:
2215 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2216 return NULL;
2217 }
2218