ixv.c revision 1.21 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 280197 2015-03-17 22:40:50Z jfv $*/
34 /*$NetBSD: ixv.c,v 1.21 2016/12/01 06:27:18 msaitoh Exp $*/
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "vlan.h"
41
42 /*********************************************************************
43 * Driver version
44 *********************************************************************/
45 char ixv_driver_version[] = "1.2.5";
46
47 /*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64 /* required last entry */
65 {0, 0, 0, 0, 0}
66 };
67
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
71
72 static const char *ixv_strings[] = {
73 "Intel(R) PRO/10GbE Virtual Function Network Driver"
74 };
75
76 /*********************************************************************
77 * Function prototypes
78 *********************************************************************/
79 static int ixv_probe(device_t, cfdata_t, void *);
80 static void ixv_attach(device_t, device_t, void *);
81 static int ixv_detach(device_t, int);
82 #if 0
83 static int ixv_shutdown(device_t);
84 #endif
85 static int ixv_ioctl(struct ifnet *, u_long, void *);
86 static int ixv_init(struct ifnet *);
87 static void ixv_init_locked(struct adapter *);
88 static void ixv_stop(void *);
89 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
90 static int ixv_media_change(struct ifnet *);
91 static void ixv_identify_hardware(struct adapter *);
92 static int ixv_allocate_pci_resources(struct adapter *,
93 const struct pci_attach_args *);
94 static int ixv_allocate_msix(struct adapter *,
95 const struct pci_attach_args *);
96 static int ixv_setup_msix(struct adapter *);
97 static void ixv_free_pci_resources(struct adapter *);
98 static void ixv_local_timer(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
101
102 static void ixv_initialize_transmit_units(struct adapter *);
103 static void ixv_initialize_receive_units(struct adapter *);
104
105 static void ixv_enable_intr(struct adapter *);
106 static void ixv_disable_intr(struct adapter *);
107 static void ixv_set_multi(struct adapter *);
108 static void ixv_update_link_status(struct adapter *);
109 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
110 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
111 static void ixv_configure_ivars(struct adapter *);
112 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
113
114 static void ixv_setup_vlan_support(struct adapter *);
115 #if 0
116 static void ixv_register_vlan(void *, struct ifnet *, u16);
117 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
118 #endif
119
120 static void ixv_save_stats(struct adapter *);
121 static void ixv_init_stats(struct adapter *);
122 static void ixv_update_stats(struct adapter *);
123 static void ixv_add_stats_sysctls(struct adapter *);
124
125 /* The MSI/X Interrupt handlers */
126 static int ixv_msix_que(void *);
127 static int ixv_msix_mbx(void *);
128
129 /* Deferred interrupt tasklets */
130 static void ixv_handle_que(void *);
131 static void ixv_handle_mbx(void *);
132
133 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
134 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
135
136 /*********************************************************************
137 * FreeBSD Device Interface Entry Points
138 *********************************************************************/
139
140 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
141 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
142 DVF_DETACH_SHUTDOWN);
143
144 # if 0
145 static device_method_t ixv_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, ixv_probe),
148 DEVMETHOD(device_attach, ixv_attach),
149 DEVMETHOD(device_detach, ixv_detach),
150 DEVMETHOD(device_shutdown, ixv_shutdown),
151 DEVMETHOD_END
152 };
153 #endif
154
155 #if 0
156 static driver_t ixv_driver = {
157 "ixv", ixv_methods, sizeof(struct adapter),
158 };
159
160 devclass_t ixgbe_devclass;
161 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
162 MODULE_DEPEND(ixv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixv, ether, 1, 1, 1);
164 #endif
165
166 /*
167 ** TUNEABLE PARAMETERS:
168 */
169
170 /*
171 ** AIM: Adaptive Interrupt Moderation
172 ** which means that the interrupt rate
173 ** is varied over time based on the
174 ** traffic for that interrupt vector
175 */
176 static int ixv_enable_aim = FALSE;
177 #define TUNABLE_INT(__x, __y)
178 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
179
180 /* How many packets rxeof tries to clean at a time */
181 static int ixv_rx_process_limit = 256;
182 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
183
184 /* How many packets txeof tries to clean at a time */
185 static int ixv_tx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
187
188 /*
189 ** Number of TX descriptors per ring,
190 ** setting higher than RX as this seems
191 ** the better performing choice.
192 */
193 static int ixv_txd = DEFAULT_TXD;
194 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
195
196 /* Number of RX descriptors per ring */
197 static int ixv_rxd = DEFAULT_RXD;
198 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
199
200 /*
201 ** Shadow VFTA table, this is needed because
202 ** the real filter table gets cleared during
203 ** a soft reset and we need to repopulate it.
204 */
205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
206
207 /* Keep running tab on them for sanity check */
208 static int ixv_total_ports;
209
210 /*********************************************************************
211 * Device identification routine
212 *
213 * ixv_probe determines if the driver should be loaded on
214 * adapter based on PCI vendor/device id of the adapter.
215 *
216 * return 1 on success, 0 on failure
217 *********************************************************************/
218
219 static int
220 ixv_probe(device_t dev, cfdata_t cf, void *aux)
221 {
222 #ifdef __HAVE_PCI_MSI_MSIX
223 const struct pci_attach_args *pa = aux;
224
225 return (ixv_lookup(pa) != NULL) ? 1 : 0;
226 #else
227 return 0;
228 #endif
229 }
230
231 static ixgbe_vendor_info_t *
232 ixv_lookup(const struct pci_attach_args *pa)
233 {
234 pcireg_t subid;
235 ixgbe_vendor_info_t *ent;
236
237 INIT_DEBUGOUT("ixv_probe: begin");
238
239 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
240 return NULL;
241
242 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
243
244 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
245 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
246 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
247
248 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
249 (ent->subvendor_id == 0)) &&
250
251 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
252 (ent->subdevice_id == 0))) {
253 ++ixv_total_ports;
254 return ent;
255 }
256 }
257 return NULL;
258 }
259
260
261 static void
262 ixv_sysctl_attach(struct adapter *adapter)
263 {
264 struct sysctllog **log;
265 const struct sysctlnode *rnode, *cnode;
266 device_t dev;
267
268 dev = adapter->dev;
269 log = &adapter->sysctllog;
270
271 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
272 aprint_error_dev(dev, "could not create sysctl root\n");
273 return;
274 }
275
276 if (sysctl_createv(log, 0, &rnode, &cnode,
277 CTLFLAG_READWRITE, CTLTYPE_INT,
278 "debug", SYSCTL_DESCR("Debug Info"),
279 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
280 aprint_error_dev(dev, "could not create sysctl\n");
281
282 /* XXX This is an *instance* sysctl controlling a *global* variable.
283 * XXX It's that way in the FreeBSD driver that this derives from.
284 */
285 if (sysctl_createv(log, 0, &rnode, &cnode,
286 CTLFLAG_READWRITE, CTLTYPE_INT,
287 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
288 NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
289 aprint_error_dev(dev, "could not create sysctl\n");
290 }
291
292 /*********************************************************************
293 * Device initialization routine
294 *
295 * The attach entry point is called when the driver is being loaded.
296 * This routine identifies the type of hardware, allocates all resources
297 * and initializes the hardware.
298 *
299 * return 0 on success, positive on failure
300 *********************************************************************/
301
302 static void
303 ixv_attach(device_t parent, device_t dev, void *aux)
304 {
305 struct adapter *adapter;
306 struct ixgbe_hw *hw;
307 int error = 0;
308 ixgbe_vendor_info_t *ent;
309 const struct pci_attach_args *pa = aux;
310
311 INIT_DEBUGOUT("ixv_attach: begin");
312
313 /* Allocate, clear, and link in our adapter structure */
314 adapter = device_private(dev);
315 adapter->dev = adapter->osdep.dev = dev;
316 hw = &adapter->hw;
317 adapter->osdep.pc = pa->pa_pc;
318 adapter->osdep.tag = pa->pa_tag;
319 adapter->osdep.dmat = pa->pa_dmat;
320 adapter->osdep.attached = false;
321
322 ent = ixv_lookup(pa);
323
324 KASSERT(ent != NULL);
325
326 aprint_normal(": %s, Version - %s\n",
327 ixv_strings[ent->index], ixv_driver_version);
328
329 /* Core Lock Init*/
330 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
331
332 /* SYSCTL APIs */
333 ixv_sysctl_attach(adapter);
334
335 /* Set up the timer callout */
336 callout_init(&adapter->timer, 0);
337
338 /* Determine hardware revision */
339 ixv_identify_hardware(adapter);
340
341 /* Do base PCI setup - map BAR0 */
342 if (ixv_allocate_pci_resources(adapter, pa)) {
343 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
344 error = ENXIO;
345 goto err_out;
346 }
347
348 /* Do descriptor calc and sanity checks */
349 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351 aprint_error_dev(dev, "TXD config issue, using default!\n");
352 adapter->num_tx_desc = DEFAULT_TXD;
353 } else
354 adapter->num_tx_desc = ixv_txd;
355
356 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358 aprint_error_dev(dev, "RXD config issue, using default!\n");
359 adapter->num_rx_desc = DEFAULT_RXD;
360 } else
361 adapter->num_rx_desc = ixv_rxd;
362
363 /* Allocate our TX/RX Queues */
364 if (ixgbe_allocate_queues(adapter)) {
365 error = ENOMEM;
366 goto err_out;
367 }
368
369 /*
370 ** Initialize the shared code: its
371 ** at this point the mac type is set.
372 */
373 error = ixgbe_init_shared_code(hw);
374 if (error) {
375 aprint_error_dev(dev,"Shared Code Initialization Failure\n");
376 error = EIO;
377 goto err_late;
378 }
379
380 /* Setup the mailbox */
381 ixgbe_init_mbx_params_vf(hw);
382
383 ixgbe_reset_hw(hw);
384
385 error = ixgbe_init_hw(hw);
386 if (error) {
387 aprint_error_dev(dev,"Hardware Initialization Failure\n");
388 error = EIO;
389 goto err_late;
390 }
391
392 error = ixv_allocate_msix(adapter, pa);
393 if (error)
394 goto err_late;
395
396 /* If no mac address was assigned, make a random one */
397 if (!ixv_check_ether_addr(hw->mac.addr)) {
398 u8 addr[ETHER_ADDR_LEN];
399 uint64_t rndval = cprng_fast64();
400
401 memcpy(addr, &rndval, sizeof(addr));
402 addr[0] &= 0xFE;
403 addr[0] |= 0x02;
404 bcopy(addr, hw->mac.addr, sizeof(addr));
405 }
406
407 /* Setup OS specific network interface */
408 ixv_setup_interface(dev, adapter);
409
410 /* Do the stats setup */
411 ixv_save_stats(adapter);
412 ixv_init_stats(adapter);
413 ixv_add_stats_sysctls(adapter);
414
415 /* Register for VLAN events */
416 #if 0 /* XXX delete after write? */
417 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
418 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
419 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
420 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
421 #endif
422
423 INIT_DEBUGOUT("ixv_attach: end");
424 adapter->osdep.attached = true;
425 return;
426
427 err_late:
428 ixgbe_free_transmit_structures(adapter);
429 ixgbe_free_receive_structures(adapter);
430 err_out:
431 ixv_free_pci_resources(adapter);
432 return;
433
434 }
435
436 /*********************************************************************
437 * Device removal routine
438 *
439 * The detach entry point is called when the driver is being removed.
440 * This routine stops the adapter and deallocates all the resources
441 * that were allocated for driver operation.
442 *
443 * return 0 on success, positive on failure
444 *********************************************************************/
445
446 static int
447 ixv_detach(device_t dev, int flags)
448 {
449 struct adapter *adapter = device_private(dev);
450 struct ix_queue *que = adapter->queues;
451
452 INIT_DEBUGOUT("ixv_detach: begin");
453 if (adapter->osdep.attached == false)
454 return 0;
455
456 #if NVLAN > 0
457 /* Make sure VLANS are not using driver */
458 if (!VLAN_ATTACHED(&adapter->osdep.ec))
459 ; /* nothing to do: no VLANs */
460 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
461 vlan_ifdetach(adapter->ifp);
462 else {
463 aprint_error_dev(dev, "VLANs in use\n");
464 return EBUSY;
465 }
466 #endif
467
468 IXGBE_CORE_LOCK(adapter);
469 ixv_stop(adapter);
470 IXGBE_CORE_UNLOCK(adapter);
471
472 for (int i = 0; i < adapter->num_queues; i++, que++) {
473 #ifndef IXGBE_LEGACY_TX
474 softint_disestablish(txr->txq_si);
475 #endif
476 softint_disestablish(que->que_si);
477 }
478
479 /* Drain the Mailbox(link) queue */
480 softint_disestablish(adapter->link_si);
481
482 /* Unregister VLAN events */
483 #if 0 /* XXX msaitoh delete after write? */
484 if (adapter->vlan_attach != NULL)
485 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
486 if (adapter->vlan_detach != NULL)
487 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
488 #endif
489
490 ether_ifdetach(adapter->ifp);
491 callout_halt(&adapter->timer, NULL);
492 ixv_free_pci_resources(adapter);
493 #if 0 /* XXX the NetBSD port is probably missing something here */
494 bus_generic_detach(dev);
495 #endif
496 if_detach(adapter->ifp);
497
498 ixgbe_free_transmit_structures(adapter);
499 ixgbe_free_receive_structures(adapter);
500
501 IXGBE_CORE_LOCK_DESTROY(adapter);
502 return (0);
503 }
504
505 /*********************************************************************
506 *
507 * Shutdown entry point
508 *
509 **********************************************************************/
510 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
511 static int
512 ixv_shutdown(device_t dev)
513 {
514 struct adapter *adapter = device_private(dev);
515 IXGBE_CORE_LOCK(adapter);
516 ixv_stop(adapter);
517 IXGBE_CORE_UNLOCK(adapter);
518 return (0);
519 }
520 #endif
521
522 static int
523 ixv_ifflags_cb(struct ethercom *ec)
524 {
525 struct ifnet *ifp = &ec->ec_if;
526 struct adapter *adapter = ifp->if_softc;
527 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
528
529 IXGBE_CORE_LOCK(adapter);
530
531 if (change != 0)
532 adapter->if_flags = ifp->if_flags;
533
534 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
535 rc = ENETRESET;
536
537 IXGBE_CORE_UNLOCK(adapter);
538
539 return rc;
540 }
541
542 /*********************************************************************
543 * Ioctl entry point
544 *
545 * ixv_ioctl is called when the user wants to configure the
546 * interface.
547 *
548 * return 0 on success, positive on failure
549 **********************************************************************/
550
551 static int
552 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
553 {
554 struct adapter *adapter = ifp->if_softc;
555 struct ifcapreq *ifcr = data;
556 struct ifreq *ifr = (struct ifreq *) data;
557 int error = 0;
558 int l4csum_en;
559 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
560 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
561
562 switch (command) {
563 case SIOCSIFFLAGS:
564 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
565 break;
566 case SIOCADDMULTI:
567 case SIOCDELMULTI:
568 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
569 break;
570 case SIOCSIFMEDIA:
571 case SIOCGIFMEDIA:
572 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
573 break;
574 case SIOCSIFCAP:
575 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
576 break;
577 case SIOCSIFMTU:
578 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
579 break;
580 default:
581 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
582 break;
583 }
584
585 switch (command) {
586 case SIOCSIFMEDIA:
587 case SIOCGIFMEDIA:
588 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
589 case SIOCSIFCAP:
590 /* Layer-4 Rx checksum offload has to be turned on and
591 * off as a unit.
592 */
593 l4csum_en = ifcr->ifcr_capenable & l4csum;
594 if (l4csum_en != l4csum && l4csum_en != 0)
595 return EINVAL;
596 /*FALLTHROUGH*/
597 case SIOCADDMULTI:
598 case SIOCDELMULTI:
599 case SIOCSIFFLAGS:
600 case SIOCSIFMTU:
601 default:
602 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
603 return error;
604 if ((ifp->if_flags & IFF_RUNNING) == 0)
605 ;
606 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
607 IXGBE_CORE_LOCK(adapter);
608 ixv_init_locked(adapter);
609 IXGBE_CORE_UNLOCK(adapter);
610 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
611 /*
612 * Multicast list has changed; set the hardware filter
613 * accordingly.
614 */
615 IXGBE_CORE_LOCK(adapter);
616 ixv_disable_intr(adapter);
617 ixv_set_multi(adapter);
618 ixv_enable_intr(adapter);
619 IXGBE_CORE_UNLOCK(adapter);
620 }
621 return 0;
622 }
623 }
624
625 /*********************************************************************
626 * Init entry point
627 *
628 * This routine is used in two ways. It is used by the stack as
629 * init entry point in network interface structure. It is also used
630 * by the driver as a hw/sw initialization routine to get to a
631 * consistent state.
632 *
633 * return 0 on success, positive on failure
634 **********************************************************************/
635 #define IXGBE_MHADD_MFS_SHIFT 16
636
637 static void
638 ixv_init_locked(struct adapter *adapter)
639 {
640 struct ifnet *ifp = adapter->ifp;
641 device_t dev = adapter->dev;
642 struct ixgbe_hw *hw = &adapter->hw;
643 u32 mhadd, gpie;
644
645 INIT_DEBUGOUT("ixv_init: begin");
646 KASSERT(mutex_owned(&adapter->core_mtx));
647 hw->adapter_stopped = FALSE;
648 ixgbe_stop_adapter(hw);
649 callout_stop(&adapter->timer);
650
651 /* reprogram the RAR[0] in case user changed it. */
652 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
653
654 /* Get the latest mac address, User can use a LAA */
655 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
656 IXGBE_ETH_LENGTH_OF_ADDRESS);
657 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
658 hw->addr_ctrl.rar_used_count = 1;
659
660 /* Prepare transmit descriptors and buffers */
661 if (ixgbe_setup_transmit_structures(adapter)) {
662 aprint_error_dev(dev,"Could not setup transmit structures\n");
663 ixv_stop(adapter);
664 return;
665 }
666
667 ixgbe_reset_hw(hw);
668 ixv_initialize_transmit_units(adapter);
669
670 /* Setup Multicast table */
671 ixv_set_multi(adapter);
672
673 /*
674 ** Determine the correct mbuf pool
675 ** for doing jumbo/headersplit
676 */
677 if (ifp->if_mtu > ETHERMTU)
678 adapter->rx_mbuf_sz = MJUMPAGESIZE;
679 else
680 adapter->rx_mbuf_sz = MCLBYTES;
681
682 /* Prepare receive descriptors and buffers */
683 if (ixgbe_setup_receive_structures(adapter)) {
684 device_printf(dev,"Could not setup receive structures\n");
685 ixv_stop(adapter);
686 return;
687 }
688
689 /* Configure RX settings */
690 ixv_initialize_receive_units(adapter);
691
692 /* Enable Enhanced MSIX mode */
693 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
694 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
695 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
696 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
697
698 #if 0 /* XXX isn't it required? -- msaitoh */
699 /* Set the various hardware offload abilities */
700 ifp->if_hwassist = 0;
701 if (ifp->if_capenable & IFCAP_TSO4)
702 ifp->if_hwassist |= CSUM_TSO;
703 if (ifp->if_capenable & IFCAP_TXCSUM) {
704 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
705 #if __FreeBSD_version >= 800000
706 ifp->if_hwassist |= CSUM_SCTP;
707 #endif
708 }
709 #endif
710
711 /* Set MTU size */
712 if (ifp->if_mtu > ETHERMTU) {
713 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
714 mhadd &= ~IXGBE_MHADD_MFS_MASK;
715 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
716 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
717 }
718
719 /* Set up VLAN offload and filter */
720 ixv_setup_vlan_support(adapter);
721
722 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
723
724 /* Set up MSI/X routing */
725 ixv_configure_ivars(adapter);
726
727 /* Set up auto-mask */
728 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
729
730 /* Set moderation on the Link interrupt */
731 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
732
733 /* Stats init */
734 ixv_init_stats(adapter);
735
736 /* Config/Enable Link */
737 ixv_config_link(adapter);
738
739 /* And now turn on interrupts */
740 ixv_enable_intr(adapter);
741
742 /* Now inform the stack we're ready */
743 ifp->if_flags |= IFF_RUNNING;
744 ifp->if_flags &= ~IFF_OACTIVE;
745
746 return;
747 }
748
749 static int
750 ixv_init(struct ifnet *ifp)
751 {
752 struct adapter *adapter = ifp->if_softc;
753
754 IXGBE_CORE_LOCK(adapter);
755 ixv_init_locked(adapter);
756 IXGBE_CORE_UNLOCK(adapter);
757 return 0;
758 }
759
760
761 /*
762 **
763 ** MSIX Interrupt Handlers and Tasklets
764 **
765 */
766
767 static inline void
768 ixv_enable_queue(struct adapter *adapter, u32 vector)
769 {
770 struct ixgbe_hw *hw = &adapter->hw;
771 u32 queue = 1 << vector;
772 u32 mask;
773
774 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
775 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
776 }
777
778 static inline void
779 ixv_disable_queue(struct adapter *adapter, u32 vector)
780 {
781 struct ixgbe_hw *hw = &adapter->hw;
782 u64 queue = (u64)(1 << vector);
783 u32 mask;
784
785 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
786 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
787 }
788
789 static inline void
790 ixv_rearm_queues(struct adapter *adapter, u64 queues)
791 {
792 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
794 }
795
796
797 static void
798 ixv_handle_que(void *context)
799 {
800 struct ix_queue *que = context;
801 struct adapter *adapter = que->adapter;
802 struct tx_ring *txr = que->txr;
803 struct ifnet *ifp = adapter->ifp;
804 bool more;
805
806 if (ifp->if_flags & IFF_RUNNING) {
807 more = ixgbe_rxeof(que);
808 IXGBE_TX_LOCK(txr);
809 ixgbe_txeof(txr);
810 #if __FreeBSD_version >= 800000
811 if (!drbr_empty(ifp, txr->br))
812 ixgbe_mq_start_locked(ifp, txr);
813 #else
814 if (!IFQ_IS_EMPTY(&ifp->if_snd))
815 ixgbe_start_locked(txr, ifp);
816 #endif
817 IXGBE_TX_UNLOCK(txr);
818 if (more) {
819 adapter->req.ev_count++;
820 softint_schedule(que->que_si);
821 return;
822 }
823 }
824
825 /* Reenable this interrupt */
826 ixv_enable_queue(adapter, que->msix);
827 return;
828 }
829
830 /*********************************************************************
831 *
832 * MSI Queue Interrupt Service routine
833 *
834 **********************************************************************/
835 int
836 ixv_msix_que(void *arg)
837 {
838 struct ix_queue *que = arg;
839 struct adapter *adapter = que->adapter;
840 struct ifnet *ifp = adapter->ifp;
841 struct tx_ring *txr = que->txr;
842 struct rx_ring *rxr = que->rxr;
843 bool more;
844 u32 newitr = 0;
845
846 ixv_disable_queue(adapter, que->msix);
847 ++que->irqs.ev_count;
848
849 more = ixgbe_rxeof(que);
850
851 IXGBE_TX_LOCK(txr);
852 ixgbe_txeof(txr);
853 /*
854 ** Make certain that if the stack
855 ** has anything queued the task gets
856 ** scheduled to handle it.
857 */
858 #ifdef IXGBE_LEGACY_TX
859 if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
860 ixgbe_start_locked(txr, ifp);
861 #else
862 if (!drbr_empty(adapter->ifp, txr->br))
863 ixgbe_mq_start_locked(ifp, txr);
864 #endif
865 IXGBE_TX_UNLOCK(txr);
866
867 /* Do AIM now? */
868
869 if (ixv_enable_aim == FALSE)
870 goto no_calc;
871 /*
872 ** Do Adaptive Interrupt Moderation:
873 ** - Write out last calculated setting
874 ** - Calculate based on average size over
875 ** the last interval.
876 */
877 if (que->eitr_setting)
878 IXGBE_WRITE_REG(&adapter->hw,
879 IXGBE_VTEITR(que->msix),
880 que->eitr_setting);
881
882 que->eitr_setting = 0;
883
884 /* Idle, do nothing */
885 if ((txr->bytes == 0) && (rxr->bytes == 0))
886 goto no_calc;
887
888 if ((txr->bytes) && (txr->packets))
889 newitr = txr->bytes/txr->packets;
890 if ((rxr->bytes) && (rxr->packets))
891 newitr = max(newitr,
892 (rxr->bytes / rxr->packets));
893 newitr += 24; /* account for hardware frame, crc */
894
895 /* set an upper boundary */
896 newitr = min(newitr, 3000);
897
898 /* Be nice to the mid range */
899 if ((newitr > 300) && (newitr < 1200))
900 newitr = (newitr / 3);
901 else
902 newitr = (newitr / 2);
903
904 newitr |= newitr << 16;
905
906 /* save for next interrupt */
907 que->eitr_setting = newitr;
908
909 /* Reset state */
910 txr->bytes = 0;
911 txr->packets = 0;
912 rxr->bytes = 0;
913 rxr->packets = 0;
914
915 no_calc:
916 if (more)
917 softint_schedule(que->que_si);
918 else /* Reenable this interrupt */
919 ixv_enable_queue(adapter, que->msix);
920 return 1;
921 }
922
923 static int
924 ixv_msix_mbx(void *arg)
925 {
926 struct adapter *adapter = arg;
927 struct ixgbe_hw *hw = &adapter->hw;
928 u32 reg;
929
930 ++adapter->vector_irq.ev_count;
931
932 /* First get the cause */
933 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
934 /* Clear interrupt with write */
935 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
936
937 /* Link status change */
938 if (reg & IXGBE_EICR_LSC)
939 softint_schedule(adapter->link_si);
940
941 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
942 return 1;
943 }
944
945 /*********************************************************************
946 *
947 * Media Ioctl callback
948 *
949 * This routine is called whenever the user queries the status of
950 * the interface using ifconfig.
951 *
952 **********************************************************************/
953 static void
954 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
955 {
956 struct adapter *adapter = ifp->if_softc;
957
958 INIT_DEBUGOUT("ixv_media_status: begin");
959 IXGBE_CORE_LOCK(adapter);
960 ixv_update_link_status(adapter);
961
962 ifmr->ifm_status = IFM_AVALID;
963 ifmr->ifm_active = IFM_ETHER;
964
965 if (!adapter->link_active) {
966 IXGBE_CORE_UNLOCK(adapter);
967 return;
968 }
969
970 ifmr->ifm_status |= IFM_ACTIVE;
971
972 switch (adapter->link_speed) {
973 case IXGBE_LINK_SPEED_1GB_FULL:
974 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
975 break;
976 case IXGBE_LINK_SPEED_10GB_FULL:
977 ifmr->ifm_active |= IFM_FDX;
978 break;
979 }
980
981 IXGBE_CORE_UNLOCK(adapter);
982
983 return;
984 }
985
986 /*********************************************************************
987 *
988 * Media Ioctl callback
989 *
990 * This routine is called when the user changes speed/duplex using
991 * media/mediopt option with ifconfig.
992 *
993 **********************************************************************/
994 static int
995 ixv_media_change(struct ifnet * ifp)
996 {
997 struct adapter *adapter = ifp->if_softc;
998 struct ifmedia *ifm = &adapter->media;
999
1000 INIT_DEBUGOUT("ixv_media_change: begin");
1001
1002 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1003 return (EINVAL);
1004
1005 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1006 case IFM_AUTO:
1007 break;
1008 default:
1009 device_printf(adapter->dev, "Only auto media type\n");
1010 return (EINVAL);
1011 }
1012
1013 return (0);
1014 }
1015
1016
1017 /*********************************************************************
1018 * Multicast Update
1019 *
1020 * This routine is called whenever multicast address list is updated.
1021 *
1022 **********************************************************************/
1023 #define IXGBE_RAR_ENTRIES 16
1024
1025 static void
1026 ixv_set_multi(struct adapter *adapter)
1027 {
1028 struct ether_multi *enm;
1029 struct ether_multistep step;
1030 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1031 u8 *update_ptr;
1032 int mcnt = 0;
1033 struct ethercom *ec = &adapter->osdep.ec;
1034
1035 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1036
1037 ETHER_FIRST_MULTI(step, ec, enm);
1038 while (enm != NULL) {
1039 bcopy(enm->enm_addrlo,
1040 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1041 IXGBE_ETH_LENGTH_OF_ADDRESS);
1042 mcnt++;
1043 /* XXX This might be required --msaitoh */
1044 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1045 break;
1046 ETHER_NEXT_MULTI(step, enm);
1047 }
1048
1049 update_ptr = mta;
1050
1051 ixgbe_update_mc_addr_list(&adapter->hw,
1052 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1053
1054 return;
1055 }
1056
1057 /*
1058 * This is an iterator function now needed by the multicast
1059 * shared code. It simply feeds the shared code routine the
1060 * addresses in the array of ixv_set_multi() one by one.
1061 */
1062 static u8 *
1063 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1064 {
1065 u8 *addr = *update_ptr;
1066 u8 *newptr;
1067 *vmdq = 0;
1068
1069 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1070 *update_ptr = newptr;
1071 return addr;
1072 }
1073
1074 /*********************************************************************
1075 * Timer routine
1076 *
1077 * This routine checks for link status,updates statistics,
1078 * and runs the watchdog check.
1079 *
1080 **********************************************************************/
1081
1082 static void
1083 ixv_local_timer1(void *arg)
1084 {
1085 struct adapter *adapter = arg;
1086 device_t dev = adapter->dev;
1087 struct ix_queue *que = adapter->queues;
1088 u64 queues = 0;
1089 int hung = 0;
1090
1091 KASSERT(mutex_owned(&adapter->core_mtx));
1092
1093 ixv_update_link_status(adapter);
1094
1095 /* Stats Update */
1096 ixv_update_stats(adapter);
1097
1098 /*
1099 ** Check the TX queues status
1100 ** - mark hung queues so we don't schedule on them
1101 ** - watchdog only if all queues show hung
1102 */
1103 for (int i = 0; i < adapter->num_queues; i++, que++) {
1104 /* Keep track of queues with work for soft irq */
1105 if (que->txr->busy)
1106 queues |= ((u64)1 << que->me);
1107 /*
1108 ** Each time txeof runs without cleaning, but there
1109 ** are uncleaned descriptors it increments busy. If
1110 ** we get to the MAX we declare it hung.
1111 */
1112 if (que->busy == IXGBE_QUEUE_HUNG) {
1113 ++hung;
1114 /* Mark the queue as inactive */
1115 adapter->active_queues &= ~((u64)1 << que->me);
1116 continue;
1117 } else {
1118 /* Check if we've come back from hung */
1119 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1120 adapter->active_queues |= ((u64)1 << que->me);
1121 }
1122 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1123 device_printf(dev,"Warning queue %d "
1124 "appears to be hung!\n", i);
1125 que->txr->busy = IXGBE_QUEUE_HUNG;
1126 ++hung;
1127 }
1128
1129 }
1130
1131 /* Only truely watchdog if all queues show hung */
1132 if (hung == adapter->num_queues)
1133 goto watchdog;
1134 else if (queues != 0) { /* Force an IRQ on queues with work */
1135 ixv_rearm_queues(adapter, queues);
1136 }
1137
1138 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1139 return;
1140
1141 watchdog:
1142 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1143 adapter->ifp->if_flags &= ~IFF_RUNNING;
1144 adapter->watchdog_events.ev_count++;
1145 ixv_init_locked(adapter);
1146 }
1147
1148 static void
1149 ixv_local_timer(void *arg)
1150 {
1151 struct adapter *adapter = arg;
1152
1153 IXGBE_CORE_LOCK(adapter);
1154 ixv_local_timer1(adapter);
1155 IXGBE_CORE_UNLOCK(adapter);
1156 }
1157
1158 /*
1159 ** Note: this routine updates the OS on the link state
1160 ** the real check of the hardware only happens with
1161 ** a link interrupt.
1162 */
1163 static void
1164 ixv_update_link_status(struct adapter *adapter)
1165 {
1166 struct ifnet *ifp = adapter->ifp;
1167 device_t dev = adapter->dev;
1168
1169 if (adapter->link_up){
1170 if (adapter->link_active == FALSE) {
1171 if (bootverbose)
1172 device_printf(dev,"Link is up %d Gbps %s \n",
1173 ((adapter->link_speed == 128)? 10:1),
1174 "Full Duplex");
1175 adapter->link_active = TRUE;
1176 if_link_state_change(ifp, LINK_STATE_UP);
1177 }
1178 } else { /* Link down */
1179 if (adapter->link_active == TRUE) {
1180 if (bootverbose)
1181 device_printf(dev,"Link is Down\n");
1182 if_link_state_change(ifp, LINK_STATE_DOWN);
1183 adapter->link_active = FALSE;
1184 }
1185 }
1186
1187 return;
1188 }
1189
1190
1191 static void
1192 ixv_ifstop(struct ifnet *ifp, int disable)
1193 {
1194 struct adapter *adapter = ifp->if_softc;
1195
1196 IXGBE_CORE_LOCK(adapter);
1197 ixv_stop(adapter);
1198 IXGBE_CORE_UNLOCK(adapter);
1199 }
1200
1201 /*********************************************************************
1202 *
1203 * This routine disables all traffic on the adapter by issuing a
1204 * global reset on the MAC and deallocates TX/RX buffers.
1205 *
1206 **********************************************************************/
1207
1208 static void
1209 ixv_stop(void *arg)
1210 {
1211 struct ifnet *ifp;
1212 struct adapter *adapter = arg;
1213 struct ixgbe_hw *hw = &adapter->hw;
1214 ifp = adapter->ifp;
1215
1216 KASSERT(mutex_owned(&adapter->core_mtx));
1217
1218 INIT_DEBUGOUT("ixv_stop: begin\n");
1219 ixv_disable_intr(adapter);
1220
1221 /* Tell the stack that the interface is no longer active */
1222 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1223
1224 ixgbe_reset_hw(hw);
1225 adapter->hw.adapter_stopped = FALSE;
1226 ixgbe_stop_adapter(hw);
1227 callout_stop(&adapter->timer);
1228
1229 /* reprogram the RAR[0] in case user changed it. */
1230 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1231
1232 return;
1233 }
1234
1235
1236 /*********************************************************************
1237 *
1238 * Determine hardware revision.
1239 *
1240 **********************************************************************/
1241 static void
1242 ixv_identify_hardware(struct adapter *adapter)
1243 {
1244 pcitag_t tag;
1245 pci_chipset_tag_t pc;
1246 pcireg_t subid, id;
1247 struct ixgbe_hw *hw = &adapter->hw;
1248
1249 pc = adapter->osdep.pc;
1250 tag = adapter->osdep.tag;
1251
1252 /*
1253 ** Make sure BUSMASTER is set, on a VM under
1254 ** KVM it may not be and will break things.
1255 */
1256 ixgbe_pci_enable_busmaster(pc, tag);
1257
1258 id = pci_conf_read(pc, tag, PCI_ID_REG);
1259 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1260
1261 /* Save off the information about this board */
1262 hw->vendor_id = PCI_VENDOR(id);
1263 hw->device_id = PCI_PRODUCT(id);
1264 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1265 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1266 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1267
1268 /* We need this to determine device-specific things */
1269 ixgbe_set_mac_type(hw);
1270
1271 /* Set the right number of segments */
1272 adapter->num_segs = IXGBE_82599_SCATTER;
1273
1274 return;
1275 }
1276
1277 /*********************************************************************
1278 *
1279 * Setup MSIX Interrupt resources and handlers
1280 *
1281 **********************************************************************/
1282 static int
1283 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1284 {
1285 device_t dev = adapter->dev;
1286 struct ix_queue *que = adapter->queues;
1287 struct tx_ring *txr = adapter->tx_rings;
1288 int error, rid, vector = 0;
1289 pci_chipset_tag_t pc;
1290 pcitag_t tag;
1291 char intrbuf[PCI_INTRSTR_LEN];
1292 const char *intrstr = NULL;
1293 kcpuset_t *affinity;
1294 int cpu_id = 0;
1295
1296 pc = adapter->osdep.pc;
1297 tag = adapter->osdep.tag;
1298
1299 if (pci_msix_alloc_exact(pa,
1300 &adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
1301 return (ENXIO);
1302
1303 kcpuset_create(&affinity, false);
1304 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1305 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1306 sizeof(intrbuf));
1307 #ifdef IXV_MPSAFE
1308 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1309 true);
1310 #endif
1311 /* Set the handler function */
1312 adapter->osdep.ihs[i] = pci_intr_establish(pc,
1313 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
1314 if (adapter->osdep.ihs[i] == NULL) {
1315 que->res = NULL;
1316 aprint_error_dev(dev,
1317 "Failed to register QUE handler");
1318 kcpuset_destroy(affinity);
1319 return (ENXIO);
1320 }
1321 que->msix = vector;
1322 adapter->active_queues |= (u64)(1 << que->msix);
1323
1324 cpu_id = i;
1325 /* Round-robin affinity */
1326 kcpuset_zero(affinity);
1327 kcpuset_set(affinity, cpu_id % ncpu);
1328 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1329 NULL);
1330 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1331 intrstr);
1332 if (error == 0)
1333 aprint_normal(", bound queue %d to cpu %d\n",
1334 i, cpu_id);
1335 else
1336 aprint_normal("\n");
1337
1338 #ifndef IXGBE_LEGACY_TX
1339 txr->txq_si = softint_establish(SOFTINT_NET,
1340 ixgbe_deferred_mq_start, txr);
1341 #endif
1342 que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1343 que);
1344 if (que->que_si == NULL) {
1345 aprint_error_dev(dev,
1346 "could not establish software interrupt\n");
1347 }
1348 }
1349
1350 /* and Mailbox */
1351 cpu_id++;
1352 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1353 sizeof(intrbuf));
1354 #ifdef IXG_MPSAFE
1355 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
1356 #endif
1357 /* Set the mbx handler function */
1358 adapter->osdep.ihs[vector] = pci_intr_establish(pc,
1359 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
1360 if (adapter->osdep.ihs[vector] == NULL) {
1361 adapter->res = NULL;
1362 aprint_error_dev(dev, "Failed to register LINK handler\n");
1363 kcpuset_destroy(affinity);
1364 return (ENXIO);
1365 }
1366 /* Round-robin affinity */
1367 kcpuset_zero(affinity);
1368 kcpuset_set(affinity, cpu_id % ncpu);
1369 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1370
1371 aprint_normal_dev(dev,
1372 "for link, interrupting at %s, ", intrstr);
1373 if (error == 0) {
1374 aprint_normal("affinity to cpu %d\n", cpu_id);
1375 }
1376 adapter->vector = vector;
1377 /* Tasklets for Mailbox */
1378 adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1379 adapter);
1380 /*
1381 ** Due to a broken design QEMU will fail to properly
1382 ** enable the guest for MSIX unless the vectors in
1383 ** the table are all set up, so we must rewrite the
1384 ** ENABLE in the MSIX control register again at this
1385 ** point to cause it to successfully initialize us.
1386 */
1387 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1388 int msix_ctrl;
1389 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1390 rid += PCI_MSIX_CTL;
1391 msix_ctrl = pci_conf_read(pc, tag, rid);
1392 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1393 pci_conf_write(pc, tag, rid, msix_ctrl);
1394 }
1395
1396 return (0);
1397 }
1398
1399 /*
1400 * Setup MSIX resources, note that the VF
1401 * device MUST use MSIX, there is no fallback.
1402 */
1403 static int
1404 ixv_setup_msix(struct adapter *adapter)
1405 {
1406 device_t dev = adapter->dev;
1407 int want, msgs;
1408
1409 /*
1410 ** Want two vectors: one for a queue,
1411 ** plus an additional for mailbox.
1412 */
1413 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1414 if (msgs < IXG_MSIX_NINTR) {
1415 aprint_error_dev(dev,"MSIX config error\n");
1416 return (ENXIO);
1417 }
1418 want = MIN(msgs, IXG_MSIX_NINTR);
1419
1420 adapter->msix_mem = (void *)1; /* XXX */
1421 aprint_normal_dev(dev,
1422 "Using MSIX interrupts with %d vectors\n", msgs);
1423 return (want);
1424 }
1425
1426
1427 static int
1428 ixv_allocate_pci_resources(struct adapter *adapter,
1429 const struct pci_attach_args *pa)
1430 {
1431 pcireg_t memtype;
1432 device_t dev = adapter->dev;
1433 bus_addr_t addr;
1434 int flags;
1435
1436 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1437
1438 switch (memtype) {
1439 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1440 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1441 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1442 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1443 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1444 goto map_err;
1445 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1446 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1447 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1448 }
1449 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1450 adapter->osdep.mem_size, flags,
1451 &adapter->osdep.mem_bus_space_handle) != 0) {
1452 map_err:
1453 adapter->osdep.mem_size = 0;
1454 aprint_error_dev(dev, "unable to map BAR0\n");
1455 return ENXIO;
1456 }
1457 break;
1458 default:
1459 aprint_error_dev(dev, "unexpected type on BAR0\n");
1460 return ENXIO;
1461 }
1462
1463 adapter->num_queues = 1;
1464 adapter->hw.back = &adapter->osdep;
1465
1466 /*
1467 ** Now setup MSI/X, should
1468 ** return us the number of
1469 ** configured vectors.
1470 */
1471 adapter->msix = ixv_setup_msix(adapter);
1472 if (adapter->msix == ENXIO)
1473 return (ENXIO);
1474 else
1475 return (0);
1476 }
1477
1478 static void
1479 ixv_free_pci_resources(struct adapter * adapter)
1480 {
1481 struct ix_queue *que = adapter->queues;
1482 int rid;
1483
1484 /*
1485 ** Release all msix queue resources:
1486 */
1487 for (int i = 0; i < adapter->num_queues; i++, que++) {
1488 rid = que->msix + 1;
1489 if (que->res != NULL)
1490 pci_intr_disestablish(adapter->osdep.pc,
1491 adapter->osdep.ihs[i]);
1492 }
1493
1494
1495 /* Clean the Legacy or Link interrupt last */
1496 if (adapter->vector) /* we are doing MSIX */
1497 rid = adapter->vector + 1;
1498 else
1499 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1500
1501 if (adapter->osdep.ihs[rid] != NULL)
1502 pci_intr_disestablish(adapter->osdep.pc,
1503 adapter->osdep.ihs[rid]);
1504 adapter->osdep.ihs[rid] = NULL;
1505
1506 #if defined(NETBSD_MSI_OR_MSIX)
1507 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1508 adapter->osdep.nintrs);
1509 #endif
1510
1511 if (adapter->osdep.mem_size != 0) {
1512 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1513 adapter->osdep.mem_bus_space_handle,
1514 adapter->osdep.mem_size);
1515 }
1516
1517 return;
1518 }
1519
1520 /*********************************************************************
1521 *
1522 * Setup networking device structure and register an interface.
1523 *
1524 **********************************************************************/
1525 static void
1526 ixv_setup_interface(device_t dev, struct adapter *adapter)
1527 {
1528 struct ethercom *ec = &adapter->osdep.ec;
1529 struct ifnet *ifp;
1530
1531 INIT_DEBUGOUT("ixv_setup_interface: begin");
1532
1533 ifp = adapter->ifp = &ec->ec_if;
1534 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1535 ifp->if_baudrate = 1000000000;
1536 ifp->if_init = ixv_init;
1537 ifp->if_stop = ixv_ifstop;
1538 ifp->if_softc = adapter;
1539 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1540 ifp->if_ioctl = ixv_ioctl;
1541 #if __FreeBSD_version >= 800000
1542 ifp->if_transmit = ixgbe_mq_start;
1543 ifp->if_qflush = ixgbe_qflush;
1544 #else
1545 ifp->if_start = ixgbe_start;
1546 #endif
1547 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1548
1549 if_attach(ifp);
1550 ether_ifattach(ifp, adapter->hw.mac.addr);
1551 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1552
1553 adapter->max_frame_size =
1554 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1555
1556 /*
1557 * Tell the upper layer(s) we support long frames.
1558 */
1559 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1560
1561 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1562 ifp->if_capenable = 0;
1563
1564 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1565 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1566 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1567 | ETHERCAP_VLAN_MTU;
1568 ec->ec_capenable = ec->ec_capabilities;
1569
1570 /* Don't enable LRO by default */
1571 ifp->if_capabilities |= IFCAP_LRO;
1572 #if 0
1573 ifp->if_capenable = ifp->if_capabilities;
1574 #endif
1575
1576 /*
1577 ** Dont turn this on by default, if vlans are
1578 ** created on another pseudo device (eg. lagg)
1579 ** then vlan events are not passed thru, breaking
1580 ** operation, but with HW FILTER off it works. If
1581 ** using vlans directly on the em driver you can
1582 ** enable this and get full hardware tag filtering.
1583 */
1584 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1585
1586 /*
1587 * Specify the media types supported by this adapter and register
1588 * callbacks to update media and link information
1589 */
1590 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1591 ixv_media_status);
1592 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1593 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1594 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1595
1596 return;
1597 }
1598
1599 static void
1600 ixv_config_link(struct adapter *adapter)
1601 {
1602 struct ixgbe_hw *hw = &adapter->hw;
1603 u32 autoneg, err = 0;
1604
1605 if (hw->mac.ops.check_link)
1606 err = hw->mac.ops.check_link(hw, &autoneg,
1607 &adapter->link_up, FALSE);
1608 if (err)
1609 goto out;
1610
1611 if (hw->mac.ops.setup_link)
1612 err = hw->mac.ops.setup_link(hw,
1613 autoneg, adapter->link_up);
1614 out:
1615 return;
1616 }
1617
1618
1619 /*********************************************************************
1620 *
1621 * Enable transmit unit.
1622 *
1623 **********************************************************************/
1624 static void
1625 ixv_initialize_transmit_units(struct adapter *adapter)
1626 {
1627 struct tx_ring *txr = adapter->tx_rings;
1628 struct ixgbe_hw *hw = &adapter->hw;
1629
1630
1631 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1632 u64 tdba = txr->txdma.dma_paddr;
1633 u32 txctrl, txdctl;
1634
1635 /* Set WTHRESH to 8, burst writeback */
1636 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1637 txdctl |= (8 << 16);
1638 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1639
1640 /* Set the HW Tx Head and Tail indices */
1641 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1642 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1643
1644 /* Set Tx Tail register */
1645 txr->tail = IXGBE_VFTDT(i);
1646
1647 /* Set the processing limit */
1648 txr->process_limit = ixv_tx_process_limit;
1649
1650 /* Set Ring parameters */
1651 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1652 (tdba & 0x00000000ffffffffULL));
1653 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1654 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1655 adapter->num_tx_desc *
1656 sizeof(struct ixgbe_legacy_tx_desc));
1657 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1658 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1659 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1660
1661 /* Now enable */
1662 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1663 txdctl |= IXGBE_TXDCTL_ENABLE;
1664 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1665 }
1666
1667 return;
1668 }
1669
1670
1671 /*********************************************************************
1672 *
1673 * Setup receive registers and features.
1674 *
1675 **********************************************************************/
1676 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1677
1678 static void
1679 ixv_initialize_receive_units(struct adapter *adapter)
1680 {
1681 int i;
1682 struct rx_ring *rxr = adapter->rx_rings;
1683 struct ixgbe_hw *hw = &adapter->hw;
1684 struct ifnet *ifp = adapter->ifp;
1685 u32 bufsz, fctrl, rxcsum, hlreg;
1686
1687
1688 /* Enable broadcasts */
1689 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1690 fctrl |= IXGBE_FCTRL_BAM;
1691 fctrl |= IXGBE_FCTRL_DPF;
1692 fctrl |= IXGBE_FCTRL_PMCF;
1693 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1694
1695 /* Set for Jumbo Frames? */
1696 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1697 if (ifp->if_mtu > ETHERMTU) {
1698 hlreg |= IXGBE_HLREG0_JUMBOEN;
1699 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1700 } else {
1701 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
1702 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1703 }
1704 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
1705
1706 for (i = 0; i < adapter->num_queues; i++, rxr++) {
1707 u64 rdba = rxr->rxdma.dma_paddr;
1708 u32 reg, rxdctl;
1709
1710 /* Setup the Base and Length of the Rx Descriptor Ring */
1711 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1712 (rdba & 0x00000000ffffffffULL));
1713 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1714 (rdba >> 32));
1715 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1716 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1717
1718 /* Set up the SRRCTL register */
1719 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1720 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1721 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1722 reg |= bufsz;
1723 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1724 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1725
1726 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1727 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1728 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1729 adapter->num_rx_desc - 1);
1730 /* Set the processing limit */
1731 rxr->process_limit = ixv_rx_process_limit;
1732
1733 /* Set Rx Tail register */
1734 rxr->tail = IXGBE_VFRDT(rxr->me);
1735
1736 /* Do the queue enabling last */
1737 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1738 rxdctl |= IXGBE_RXDCTL_ENABLE;
1739 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1740 for (int k = 0; k < 10; k++) {
1741 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1742 IXGBE_RXDCTL_ENABLE)
1743 break;
1744 else
1745 msec_delay(1);
1746 }
1747 wmb();
1748 }
1749
1750 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1751
1752 if (ifp->if_capenable & IFCAP_RXCSUM)
1753 rxcsum |= IXGBE_RXCSUM_PCSD;
1754
1755 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1756 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1757
1758 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1759
1760 return;
1761 }
1762
1763 static void
1764 ixv_setup_vlan_support(struct adapter *adapter)
1765 {
1766 struct ixgbe_hw *hw = &adapter->hw;
1767 u32 ctrl, vid, vfta, retry;
1768
1769
1770 /*
1771 ** We get here thru init_locked, meaning
1772 ** a soft reset, this has already cleared
1773 ** the VFTA and other state, so if there
1774 ** have been no vlan's registered do nothing.
1775 */
1776 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1777 return;
1778
1779 /* Enable the queues */
1780 for (int i = 0; i < adapter->num_queues; i++) {
1781 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1782 ctrl |= IXGBE_RXDCTL_VME;
1783 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1784 }
1785
1786 /*
1787 ** A soft reset zero's out the VFTA, so
1788 ** we need to repopulate it now.
1789 */
1790 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1791 if (ixv_shadow_vfta[i] == 0)
1792 continue;
1793 vfta = ixv_shadow_vfta[i];
1794 /*
1795 ** Reconstruct the vlan id's
1796 ** based on the bits set in each
1797 ** of the array ints.
1798 */
1799 for ( int j = 0; j < 32; j++) {
1800 retry = 0;
1801 if ((vfta & (1 << j)) == 0)
1802 continue;
1803 vid = (i * 32) + j;
1804 /* Call the shared code mailbox routine */
1805 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1806 if (++retry > 5)
1807 break;
1808 }
1809 }
1810 }
1811 }
1812
1813 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1814 /*
1815 ** This routine is run via an vlan config EVENT,
1816 ** it enables us to use the HW Filter table since
1817 ** we can get the vlan id. This just creates the
1818 ** entry in the soft version of the VFTA, init will
1819 ** repopulate the real table.
1820 */
1821 static void
1822 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1823 {
1824 struct adapter *adapter = ifp->if_softc;
1825 u16 index, bit;
1826
1827 if (ifp->if_softc != arg) /* Not our event */
1828 return;
1829
1830 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1831 return;
1832
1833 IXGBE_CORE_LOCK(adapter);
1834 index = (vtag >> 5) & 0x7F;
1835 bit = vtag & 0x1F;
1836 ixv_shadow_vfta[index] |= (1 << bit);
1837 /* Re-init to load the changes */
1838 ixv_init_locked(adapter);
1839 IXGBE_CORE_UNLOCK(adapter);
1840 }
1841
1842 /*
1843 ** This routine is run via an vlan
1844 ** unconfig EVENT, remove our entry
1845 ** in the soft vfta.
1846 */
1847 static void
1848 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1849 {
1850 struct adapter *adapter = ifp->if_softc;
1851 u16 index, bit;
1852
1853 if (ifp->if_softc != arg)
1854 return;
1855
1856 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1857 return;
1858
1859 IXGBE_CORE_LOCK(adapter);
1860 index = (vtag >> 5) & 0x7F;
1861 bit = vtag & 0x1F;
1862 ixv_shadow_vfta[index] &= ~(1 << bit);
1863 /* Re-init to load the changes */
1864 ixv_init_locked(adapter);
1865 IXGBE_CORE_UNLOCK(adapter);
1866 }
1867 #endif
1868
1869 static void
1870 ixv_enable_intr(struct adapter *adapter)
1871 {
1872 struct ixgbe_hw *hw = &adapter->hw;
1873 struct ix_queue *que = adapter->queues;
1874 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1875
1876
1877 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1878
1879 mask = IXGBE_EIMS_ENABLE_MASK;
1880 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1881 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1882
1883 for (int i = 0; i < adapter->num_queues; i++, que++)
1884 ixv_enable_queue(adapter, que->msix);
1885
1886 IXGBE_WRITE_FLUSH(hw);
1887
1888 return;
1889 }
1890
1891 static void
1892 ixv_disable_intr(struct adapter *adapter)
1893 {
1894 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1895 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1896 IXGBE_WRITE_FLUSH(&adapter->hw);
1897 return;
1898 }
1899
1900 /*
1901 ** Setup the correct IVAR register for a particular MSIX interrupt
1902 ** - entry is the register array entry
1903 ** - vector is the MSIX vector for this queue
1904 ** - type is RX/TX/MISC
1905 */
1906 static void
1907 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1908 {
1909 struct ixgbe_hw *hw = &adapter->hw;
1910 u32 ivar, index;
1911
1912 vector |= IXGBE_IVAR_ALLOC_VAL;
1913
1914 if (type == -1) { /* MISC IVAR */
1915 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1916 ivar &= ~0xFF;
1917 ivar |= vector;
1918 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1919 } else { /* RX/TX IVARS */
1920 index = (16 * (entry & 1)) + (8 * type);
1921 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1922 ivar &= ~(0xFF << index);
1923 ivar |= (vector << index);
1924 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1925 }
1926 }
1927
1928 static void
1929 ixv_configure_ivars(struct adapter *adapter)
1930 {
1931 struct ix_queue *que = adapter->queues;
1932
1933 for (int i = 0; i < adapter->num_queues; i++, que++) {
1934 /* First the RX queue entry */
1935 ixv_set_ivar(adapter, i, que->msix, 0);
1936 /* ... and the TX */
1937 ixv_set_ivar(adapter, i, que->msix, 1);
1938 /* Set an initial value in EITR */
1939 IXGBE_WRITE_REG(&adapter->hw,
1940 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1941 }
1942
1943 /* For the mailbox interrupt */
1944 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1945 }
1946
1947
1948 /*
1949 ** Tasklet handler for MSIX MBX interrupts
1950 ** - do outside interrupt since it might sleep
1951 */
1952 static void
1953 ixv_handle_mbx(void *context)
1954 {
1955 struct adapter *adapter = context;
1956
1957 ixgbe_check_link(&adapter->hw,
1958 &adapter->link_speed, &adapter->link_up, 0);
1959 ixv_update_link_status(adapter);
1960 }
1961
1962 /*
1963 ** The VF stats registers never have a truely virgin
1964 ** starting point, so this routine tries to make an
1965 ** artificial one, marking ground zero on attach as
1966 ** it were.
1967 */
1968 static void
1969 ixv_save_stats(struct adapter *adapter)
1970 {
1971 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1972
1973 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
1974 stats->saved_reset_vfgprc +=
1975 stats->vfgprc.ev_count - stats->base_vfgprc;
1976 stats->saved_reset_vfgptc +=
1977 stats->vfgptc.ev_count - stats->base_vfgptc;
1978 stats->saved_reset_vfgorc +=
1979 stats->vfgorc.ev_count - stats->base_vfgorc;
1980 stats->saved_reset_vfgotc +=
1981 stats->vfgotc.ev_count - stats->base_vfgotc;
1982 stats->saved_reset_vfmprc +=
1983 stats->vfmprc.ev_count - stats->base_vfmprc;
1984 }
1985 }
1986
1987 static void
1988 ixv_init_stats(struct adapter *adapter)
1989 {
1990 struct ixgbe_hw *hw = &adapter->hw;
1991
1992 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1993 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1994 adapter->stats.vf.last_vfgorc |=
1995 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1996
1997 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1998 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1999 adapter->stats.vf.last_vfgotc |=
2000 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2001
2002 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2003
2004 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2005 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2006 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2007 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2008 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2009 }
2010
2011 #define UPDATE_STAT_32(reg, last, count) \
2012 { \
2013 u32 current = IXGBE_READ_REG(hw, reg); \
2014 if (current < last) \
2015 count.ev_count += 0x100000000LL; \
2016 last = current; \
2017 count.ev_count &= 0xFFFFFFFF00000000LL; \
2018 count.ev_count |= current; \
2019 }
2020
2021 #define UPDATE_STAT_36(lsb, msb, last, count) \
2022 { \
2023 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2024 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2025 u64 current = ((cur_msb << 32) | cur_lsb); \
2026 if (current < last) \
2027 count.ev_count += 0x1000000000LL; \
2028 last = current; \
2029 count.ev_count &= 0xFFFFFFF000000000LL; \
2030 count.ev_count |= current; \
2031 }
2032
2033 /*
2034 ** ixv_update_stats - Update the board statistics counters.
2035 */
2036 void
2037 ixv_update_stats(struct adapter *adapter)
2038 {
2039 struct ixgbe_hw *hw = &adapter->hw;
2040
2041 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2042 adapter->stats.vf.vfgprc);
2043 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2044 adapter->stats.vf.vfgptc);
2045 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2046 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2047 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2048 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2049 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2050 adapter->stats.vf.vfmprc);
2051 }
2052
2053 /*
2054 * Add statistic sysctls for the VF.
2055 */
2056 static void
2057 ixv_add_stats_sysctls(struct adapter *adapter)
2058 {
2059 device_t dev = adapter->dev;
2060 struct ix_queue *que = &adapter->queues[0];
2061 struct tx_ring *txr = que->txr;
2062 struct rx_ring *rxr = que->rxr;
2063
2064 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2065
2066 const char *xname = device_xname(dev);
2067
2068 /* Driver Statistics */
2069 evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
2070 NULL, xname, "Driver dropped packets");
2071 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2072 NULL, xname, "m_defrag() failed");
2073 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2074 NULL, xname, "Watchdog timeouts");
2075
2076 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2077 xname, "Good Packets Received");
2078 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2079 xname, "Good Octets Received");
2080 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2081 xname, "Multicast Packets Received");
2082 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2083 xname, "Good Packets Transmitted");
2084 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2085 xname, "Good Octets Transmitted");
2086 evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
2087 xname, "IRQs on queue");
2088 evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
2089 xname, "RX irqs on queue");
2090 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
2091 xname, "RX packets");
2092 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
2093 xname, "RX bytes");
2094 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
2095 xname, "Discarded RX packets");
2096 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
2097 xname, "TX Packets");
2098 evcnt_attach_dynamic(&txr->tx_bytes, EVCNT_TYPE_MISC, NULL,
2099 xname, "TX Bytes");
2100 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
2101 xname, "# of times not enough descriptors were available during TX");
2102 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
2103 xname, "TX TSO");
2104 }
2105
2106 /**********************************************************************
2107 *
2108 * This routine is called only when em_display_debug_stats is enabled.
2109 * This routine provides a way to take a look at important statistics
2110 * maintained by the driver and hardware.
2111 *
2112 **********************************************************************/
2113 static void
2114 ixv_print_debug_info(struct adapter *adapter)
2115 {
2116 device_t dev = adapter->dev;
2117 struct ixgbe_hw *hw = &adapter->hw;
2118 struct ix_queue *que = adapter->queues;
2119 struct rx_ring *rxr;
2120 struct tx_ring *txr;
2121 #ifdef LRO
2122 struct lro_ctrl *lro;
2123 #endif /* LRO */
2124
2125 device_printf(dev,"Error Byte Count = %u \n",
2126 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2127
2128 for (int i = 0; i < adapter->num_queues; i++, que++) {
2129 txr = que->txr;
2130 rxr = que->rxr;
2131 #ifdef LRO
2132 lro = &rxr->lro;
2133 #endif /* LRO */
2134 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2135 que->msix, (long)que->irqs.ev_count);
2136 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2137 rxr->me, (long long)rxr->rx_packets.ev_count);
2138 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2139 rxr->me, (long)rxr->rx_bytes.ev_count);
2140 #ifdef LRO
2141 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2142 rxr->me, lro->lro_queued);
2143 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2144 rxr->me, lro->lro_flushed);
2145 #endif /* LRO */
2146 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2147 txr->me, (long)txr->total_packets.ev_count);
2148 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2149 txr->me, (long)txr->no_desc_avail.ev_count);
2150 }
2151
2152 device_printf(dev,"MBX IRQ Handled: %lu\n",
2153 (long)adapter->vector_irq.ev_count);
2154 return;
2155 }
2156
2157 static int
2158 ixv_sysctl_debug(SYSCTLFN_ARGS)
2159 {
2160 struct sysctlnode node;
2161 int error, result;
2162 struct adapter *adapter;
2163
2164 node = *rnode;
2165 adapter = (struct adapter *)node.sysctl_data;
2166 node.sysctl_data = &result;
2167 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2168
2169 if (error)
2170 return error;
2171
2172 if (result == 1)
2173 ixv_print_debug_info(adapter);
2174
2175 return 0;
2176 }
2177
2178 const struct sysctlnode *
2179 ixv_sysctl_instance(struct adapter *adapter)
2180 {
2181 const char *dvname;
2182 struct sysctllog **log;
2183 int rc;
2184 const struct sysctlnode *rnode;
2185
2186 log = &adapter->sysctllog;
2187 dvname = device_xname(adapter->dev);
2188
2189 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2190 0, CTLTYPE_NODE, dvname,
2191 SYSCTL_DESCR("ixv information and settings"),
2192 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2193 goto err;
2194
2195 return rnode;
2196 err:
2197 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2198 return NULL;
2199 }
2200