ixv.c revision 1.45 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
34 /*$NetBSD: ixv.c,v 1.45 2017/02/08 08:13:53 msaitoh Exp $*/
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "vlan.h"
41
42 /*********************************************************************
43 * Driver version
44 *********************************************************************/
45 char ixv_driver_version[] = "1.4.6-k";
46
47 /*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 /* required last entry */
64 {0, 0, 0, 0, 0}
65 };
66
67 /*********************************************************************
68 * Table of branding strings
69 *********************************************************************/
70
71 static const char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /*********************************************************************
76 * Function prototypes
77 *********************************************************************/
78 static int ixv_probe(device_t, cfdata_t, void *);
79 static void ixv_attach(device_t, device_t, void *);
80 static int ixv_detach(device_t, int);
81 #if 0
82 static int ixv_shutdown(device_t);
83 #endif
84 static int ixv_ioctl(struct ifnet *, u_long, void *);
85 static int ixv_init(struct ifnet *);
86 static void ixv_init_locked(struct adapter *);
87 static void ixv_stop(void *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static void ixv_identify_hardware(struct adapter *);
91 static int ixv_allocate_pci_resources(struct adapter *,
92 const struct pci_attach_args *);
93 static int ixv_allocate_msix(struct adapter *,
94 const struct pci_attach_args *);
95 static int ixv_setup_msix(struct adapter *);
96 static void ixv_free_pci_resources(struct adapter *);
97 static void ixv_local_timer(void *);
98 static void ixv_local_timer_locked(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
101
102 static void ixv_initialize_transmit_units(struct adapter *);
103 static void ixv_initialize_receive_units(struct adapter *);
104
105 static void ixv_enable_intr(struct adapter *);
106 static void ixv_disable_intr(struct adapter *);
107 static void ixv_set_multi(struct adapter *);
108 static void ixv_update_link_status(struct adapter *);
109 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
110 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
111 static void ixv_configure_ivars(struct adapter *);
112 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
113
114 static void ixv_setup_vlan_support(struct adapter *);
115 #if 0
116 static void ixv_register_vlan(void *, struct ifnet *, u16);
117 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
118 #endif
119
120 static void ixv_save_stats(struct adapter *);
121 static void ixv_init_stats(struct adapter *);
122 static void ixv_update_stats(struct adapter *);
123 static void ixv_add_stats_sysctls(struct adapter *);
124 static void ixv_set_sysctl_value(struct adapter *, const char *,
125 const char *, int *, int);
126
127 /* The MSI/X Interrupt handlers */
128 static int ixv_msix_que(void *);
129 static int ixv_msix_mbx(void *);
130
131 /* Deferred interrupt tasklets */
132 static void ixv_handle_que(void *);
133 static void ixv_handle_mbx(void *);
134
135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
137
138 #ifdef DEV_NETMAP
139 /*
140 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
141 * if_ix.c.
142 */
143 extern void ixgbe_netmap_attach(struct adapter *adapter);
144
145 #include <net/netmap.h>
146 #include <sys/selinfo.h>
147 #include <dev/netmap/netmap_kern.h>
148 #endif /* DEV_NETMAP */
149
150 /*********************************************************************
151 * FreeBSD Device Interface Entry Points
152 *********************************************************************/
153
154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
155 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
156 DVF_DETACH_SHUTDOWN);
157
158 # if 0
159 static device_method_t ixv_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, ixv_probe),
162 DEVMETHOD(device_attach, ixv_attach),
163 DEVMETHOD(device_detach, ixv_detach),
164 DEVMETHOD(device_shutdown, ixv_shutdown),
165 DEVMETHOD_END
166 };
167 #endif
168
169 #if 0
170 static driver_t ixv_driver = {
171 "ixv", ixv_methods, sizeof(struct adapter),
172 };
173
174 devclass_t ixv_devclass;
175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
178 #ifdef DEV_NETMAP
179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
180 #endif /* DEV_NETMAP */
181 /* XXX depend on 'ix' ? */
182 #endif
183
184 /*
185 ** TUNEABLE PARAMETERS:
186 */
187
188 /* Number of Queues - do not exceed MSIX vectors - 1 */
189 static int ixv_num_queues = 0;
190 #define TUNABLE_INT(__x, __y)
191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
192
193 /*
194 ** AIM: Adaptive Interrupt Moderation
195 ** which means that the interrupt rate
196 ** is varied over time based on the
197 ** traffic for that interrupt vector
198 */
199 static int ixv_enable_aim = FALSE;
200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
201
202 /* How many packets rxeof tries to clean at a time */
203 static int ixv_rx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
205
206 /* How many packets txeof tries to clean at a time */
207 static int ixv_tx_process_limit = 256;
208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
209
210 /*
211 ** Number of TX descriptors per ring,
212 ** setting higher than RX as this seems
213 ** the better performing choice.
214 */
215 static int ixv_txd = DEFAULT_TXD;
216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
217
218 /* Number of RX descriptors per ring */
219 static int ixv_rxd = DEFAULT_RXD;
220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
221
222 /*
223 ** Shadow VFTA table, this is needed because
224 ** the real filter table gets cleared during
225 ** a soft reset and we need to repopulate it.
226 */
227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
228
229 /*********************************************************************
230 * Device identification routine
231 *
232 * ixv_probe determines if the driver should be loaded on
233 * adapter based on PCI vendor/device id of the adapter.
234 *
235 * return 1 on success, 0 on failure
236 *********************************************************************/
237
238 static int
239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
240 {
241 #ifdef __HAVE_PCI_MSI_MSIX
242 const struct pci_attach_args *pa = aux;
243
244 return (ixv_lookup(pa) != NULL) ? 1 : 0;
245 #else
246 return 0;
247 #endif
248 }
249
250 static ixgbe_vendor_info_t *
251 ixv_lookup(const struct pci_attach_args *pa)
252 {
253 pcireg_t subid;
254 ixgbe_vendor_info_t *ent;
255
256 INIT_DEBUGOUT("ixv_lookup: begin");
257
258 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
259 return NULL;
260
261 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
262
263 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
264 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
265 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
266
267 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
269
270 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 return ent;
273 }
274 }
275 return NULL;
276 }
277
278
279 static void
280 ixv_sysctl_attach(struct adapter *adapter)
281 {
282 struct sysctllog **log;
283 const struct sysctlnode *rnode, *cnode;
284 device_t dev;
285
286 dev = adapter->dev;
287 log = &adapter->sysctllog;
288
289 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
290 aprint_error_dev(dev, "could not create sysctl root\n");
291 return;
292 }
293
294 if (sysctl_createv(log, 0, &rnode, &cnode,
295 CTLFLAG_READWRITE, CTLTYPE_INT,
296 "debug", SYSCTL_DESCR("Debug Info"),
297 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
298 aprint_error_dev(dev, "could not create sysctl\n");
299
300 /* XXX This is an *instance* sysctl controlling a *global* variable.
301 * XXX It's that way in the FreeBSD driver that this derives from.
302 */
303 if (sysctl_createv(log, 0, &rnode, &cnode,
304 CTLFLAG_READWRITE, CTLTYPE_INT,
305 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
306 NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
307 aprint_error_dev(dev, "could not create sysctl\n");
308 }
309
310 /*********************************************************************
311 * Device initialization routine
312 *
313 * The attach entry point is called when the driver is being loaded.
314 * This routine identifies the type of hardware, allocates all resources
315 * and initializes the hardware.
316 *
317 * return 0 on success, positive on failure
318 *********************************************************************/
319
320 static void
321 ixv_attach(device_t parent, device_t dev, void *aux)
322 {
323 struct adapter *adapter;
324 struct ixgbe_hw *hw;
325 int error = 0;
326 ixgbe_vendor_info_t *ent;
327 const struct pci_attach_args *pa = aux;
328
329 INIT_DEBUGOUT("ixv_attach: begin");
330
331 /* Allocate, clear, and link in our adapter structure */
332 adapter = device_private(dev);
333 adapter->dev = dev;
334 hw = &adapter->hw;
335
336 #ifdef DEV_NETMAP
337 adapter->init_locked = ixv_init_locked;
338 adapter->stop_locked = ixv_stop;
339 #endif
340
341 adapter->osdep.pc = pa->pa_pc;
342 adapter->osdep.tag = pa->pa_tag;
343 if (pci_dma64_available(pa))
344 adapter->osdep.dmat = pa->pa_dmat64;
345 else
346 adapter->osdep.dmat = pa->pa_dmat;
347 adapter->osdep.attached = false;
348
349 ent = ixv_lookup(pa);
350
351 KASSERT(ent != NULL);
352
353 aprint_normal(": %s, Version - %s\n",
354 ixv_strings[ent->index], ixv_driver_version);
355
356 /* Core Lock Init*/
357 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
358
359 /* SYSCTL APIs */
360 ixv_sysctl_attach(adapter);
361
362 /* Set up the timer callout */
363 callout_init(&adapter->timer, 0);
364
365 /* Determine hardware revision */
366 ixv_identify_hardware(adapter);
367
368 /* Do base PCI setup - map BAR0 */
369 if (ixv_allocate_pci_resources(adapter, pa)) {
370 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
371 error = ENXIO;
372 goto err_out;
373 }
374
375 /* Sysctls for limiting the amount of work done in the taskqueues */
376 ixv_set_sysctl_value(adapter, "rx_processing_limit",
377 "max number of rx packets to process",
378 &adapter->rx_process_limit, ixv_rx_process_limit);
379
380 ixv_set_sysctl_value(adapter, "tx_processing_limit",
381 "max number of tx packets to process",
382 &adapter->tx_process_limit, ixv_tx_process_limit);
383
384 /* Do descriptor calc and sanity checks */
385 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
386 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
387 aprint_error_dev(dev, "TXD config issue, using default!\n");
388 adapter->num_tx_desc = DEFAULT_TXD;
389 } else
390 adapter->num_tx_desc = ixv_txd;
391
392 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
393 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
394 aprint_error_dev(dev, "RXD config issue, using default!\n");
395 adapter->num_rx_desc = DEFAULT_RXD;
396 } else
397 adapter->num_rx_desc = ixv_rxd;
398
399 /* Allocate our TX/RX Queues */
400 if (ixgbe_allocate_queues(adapter)) {
401 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
402 error = ENOMEM;
403 goto err_out;
404 }
405
406 /*
407 ** Initialize the shared code: its
408 ** at this point the mac type is set.
409 */
410 error = ixgbe_init_shared_code(hw);
411 if (error) {
412 aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
413 error = EIO;
414 goto err_late;
415 }
416
417 /* Setup the mailbox */
418 ixgbe_init_mbx_params_vf(hw);
419
420 /* Reset mbox api to 1.0 */
421 error = ixgbe_reset_hw(hw);
422 if (error == IXGBE_ERR_RESET_FAILED)
423 aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
424 else if (error)
425 aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
426 if (error) {
427 error = EIO;
428 goto err_late;
429 }
430
431 /* Negotiate mailbox API version */
432 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
433 if (error)
434 aprint_debug_dev(dev,
435 "MBX API 1.1 negotiation failed! Error %d\n", error);
436
437 error = ixgbe_init_hw(hw);
438 if (error) {
439 aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
440 error = EIO;
441 goto err_late;
442 }
443
444 error = ixv_allocate_msix(adapter, pa);
445 if (error) {
446 device_printf(dev, "ixv_allocate_msix() failed!\n");
447 goto err_late;
448 }
449
450 /* If no mac address was assigned, make a random one */
451 if (!ixv_check_ether_addr(hw->mac.addr)) {
452 u8 addr[ETHER_ADDR_LEN];
453 uint64_t rndval = cprng_fast64();
454
455 memcpy(addr, &rndval, sizeof(addr));
456 addr[0] &= 0xFE;
457 addr[0] |= 0x02;
458 bcopy(addr, hw->mac.addr, sizeof(addr));
459 }
460
461 /* Setup OS specific network interface */
462 ixv_setup_interface(dev, adapter);
463
464 /* Do the stats setup */
465 ixv_save_stats(adapter);
466 ixv_init_stats(adapter);
467 ixv_add_stats_sysctls(adapter);
468
469 /* Register for VLAN events */
470 #if 0 /* XXX delete after write? */
471 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
472 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
473 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
474 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
475 #endif
476
477 #ifdef DEV_NETMAP
478 ixgbe_netmap_attach(adapter);
479 #endif /* DEV_NETMAP */
480 INIT_DEBUGOUT("ixv_attach: end");
481 adapter->osdep.attached = true;
482 return;
483
484 err_late:
485 ixgbe_free_transmit_structures(adapter);
486 ixgbe_free_receive_structures(adapter);
487 err_out:
488 ixv_free_pci_resources(adapter);
489 return;
490
491 }
492
493 /*********************************************************************
494 * Device removal routine
495 *
496 * The detach entry point is called when the driver is being removed.
497 * This routine stops the adapter and deallocates all the resources
498 * that were allocated for driver operation.
499 *
500 * return 0 on success, positive on failure
501 *********************************************************************/
502
503 static int
504 ixv_detach(device_t dev, int flags)
505 {
506 struct adapter *adapter = device_private(dev);
507 struct ix_queue *que = adapter->queues;
508 struct tx_ring *txr = adapter->tx_rings;
509
510 INIT_DEBUGOUT("ixv_detach: begin");
511 if (adapter->osdep.attached == false)
512 return 0;
513
514 #if NVLAN > 0
515 /* Make sure VLANS are not using driver */
516 if (!VLAN_ATTACHED(&adapter->osdep.ec))
517 ; /* nothing to do: no VLANs */
518 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
519 vlan_ifdetach(adapter->ifp);
520 else {
521 aprint_error_dev(dev, "VLANs in use, detach first\n");
522 return EBUSY;
523 }
524 #endif
525
526 IXGBE_CORE_LOCK(adapter);
527 ixv_stop(adapter);
528 IXGBE_CORE_UNLOCK(adapter);
529
530 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
531 #ifndef IXGBE_LEGACY_TX
532 softint_disestablish(txr->txr_si);
533 #endif
534 softint_disestablish(que->que_si);
535 }
536
537 /* Drain the Mailbox(link) queue */
538 softint_disestablish(adapter->link_si);
539
540 /* Unregister VLAN events */
541 #if 0 /* XXX msaitoh delete after write? */
542 if (adapter->vlan_attach != NULL)
543 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
544 if (adapter->vlan_detach != NULL)
545 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
546 #endif
547
548 ether_ifdetach(adapter->ifp);
549 callout_halt(&adapter->timer, NULL);
550 #ifdef DEV_NETMAP
551 netmap_detach(adapter->ifp);
552 #endif /* DEV_NETMAP */
553 ixv_free_pci_resources(adapter);
554 #if 0 /* XXX the NetBSD port is probably missing something here */
555 bus_generic_detach(dev);
556 #endif
557 if_detach(adapter->ifp);
558
559 sysctl_teardown(&adapter->sysctllog);
560
561 ixgbe_free_transmit_structures(adapter);
562 ixgbe_free_receive_structures(adapter);
563
564 IXGBE_CORE_LOCK_DESTROY(adapter);
565 return (0);
566 }
567
568 /*********************************************************************
569 *
570 * Shutdown entry point
571 *
572 **********************************************************************/
573 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
574 static int
575 ixv_shutdown(device_t dev)
576 {
577 struct adapter *adapter = device_private(dev);
578 IXGBE_CORE_LOCK(adapter);
579 ixv_stop(adapter);
580 IXGBE_CORE_UNLOCK(adapter);
581 return (0);
582 }
583 #endif
584
585 static int
586 ixv_ifflags_cb(struct ethercom *ec)
587 {
588 struct ifnet *ifp = &ec->ec_if;
589 struct adapter *adapter = ifp->if_softc;
590 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
591
592 IXGBE_CORE_LOCK(adapter);
593
594 if (change != 0)
595 adapter->if_flags = ifp->if_flags;
596
597 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
598 rc = ENETRESET;
599
600 IXGBE_CORE_UNLOCK(adapter);
601
602 return rc;
603 }
604
605 /*********************************************************************
606 * Ioctl entry point
607 *
608 * ixv_ioctl is called when the user wants to configure the
609 * interface.
610 *
611 * return 0 on success, positive on failure
612 **********************************************************************/
613
614 static int
615 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
616 {
617 struct adapter *adapter = ifp->if_softc;
618 struct ifcapreq *ifcr = data;
619 struct ifreq *ifr = (struct ifreq *) data;
620 int error = 0;
621 int l4csum_en;
622 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
623 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
624
625 switch (command) {
626 case SIOCSIFFLAGS:
627 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
628 break;
629 case SIOCADDMULTI:
630 case SIOCDELMULTI:
631 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
632 break;
633 case SIOCSIFMEDIA:
634 case SIOCGIFMEDIA:
635 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
636 break;
637 case SIOCSIFCAP:
638 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
639 break;
640 case SIOCSIFMTU:
641 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
642 break;
643 default:
644 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
645 break;
646 }
647
648 switch (command) {
649 case SIOCSIFMEDIA:
650 case SIOCGIFMEDIA:
651 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
652 case SIOCSIFCAP:
653 /* Layer-4 Rx checksum offload has to be turned on and
654 * off as a unit.
655 */
656 l4csum_en = ifcr->ifcr_capenable & l4csum;
657 if (l4csum_en != l4csum && l4csum_en != 0)
658 return EINVAL;
659 /*FALLTHROUGH*/
660 case SIOCADDMULTI:
661 case SIOCDELMULTI:
662 case SIOCSIFFLAGS:
663 case SIOCSIFMTU:
664 default:
665 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
666 return error;
667 if ((ifp->if_flags & IFF_RUNNING) == 0)
668 ;
669 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
670 IXGBE_CORE_LOCK(adapter);
671 ixv_init_locked(adapter);
672 IXGBE_CORE_UNLOCK(adapter);
673 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
674 /*
675 * Multicast list has changed; set the hardware filter
676 * accordingly.
677 */
678 IXGBE_CORE_LOCK(adapter);
679 ixv_disable_intr(adapter);
680 ixv_set_multi(adapter);
681 ixv_enable_intr(adapter);
682 IXGBE_CORE_UNLOCK(adapter);
683 }
684 return 0;
685 }
686 }
687
688 /*********************************************************************
689 * Init entry point
690 *
691 * This routine is used in two ways. It is used by the stack as
692 * init entry point in network interface structure. It is also used
693 * by the driver as a hw/sw initialization routine to get to a
694 * consistent state.
695 *
696 * return 0 on success, positive on failure
697 **********************************************************************/
698 #define IXGBE_MHADD_MFS_SHIFT 16
699
700 static void
701 ixv_init_locked(struct adapter *adapter)
702 {
703 struct ifnet *ifp = adapter->ifp;
704 device_t dev = adapter->dev;
705 struct ixgbe_hw *hw = &adapter->hw;
706 int error = 0;
707
708 INIT_DEBUGOUT("ixv_init_locked: begin");
709 KASSERT(mutex_owned(&adapter->core_mtx));
710 hw->adapter_stopped = FALSE;
711 ixgbe_stop_adapter(hw);
712 callout_stop(&adapter->timer);
713
714 /* reprogram the RAR[0] in case user changed it. */
715 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
716
717 /* Get the latest mac address, User can use a LAA */
718 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
719 IXGBE_ETH_LENGTH_OF_ADDRESS);
720 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
721 hw->addr_ctrl.rar_used_count = 1;
722
723 /* Prepare transmit descriptors and buffers */
724 if (ixgbe_setup_transmit_structures(adapter)) {
725 aprint_error_dev(dev, "Could not setup transmit structures\n");
726 ixv_stop(adapter);
727 return;
728 }
729
730 /* Reset VF and renegotiate mailbox API version */
731 ixgbe_reset_hw(hw);
732 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
733 if (error)
734 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
735
736 ixv_initialize_transmit_units(adapter);
737
738 /* Setup Multicast table */
739 ixv_set_multi(adapter);
740
741 /*
742 ** Determine the correct mbuf pool
743 ** for doing jumbo/headersplit
744 */
745 if (ifp->if_mtu > ETHERMTU)
746 adapter->rx_mbuf_sz = MJUMPAGESIZE;
747 else
748 adapter->rx_mbuf_sz = MCLBYTES;
749
750 /* Prepare receive descriptors and buffers */
751 if (ixgbe_setup_receive_structures(adapter)) {
752 device_printf(dev, "Could not setup receive structures\n");
753 ixv_stop(adapter);
754 return;
755 }
756
757 /* Configure RX settings */
758 ixv_initialize_receive_units(adapter);
759
760 #if 0 /* XXX isn't it required? -- msaitoh */
761 /* Set the various hardware offload abilities */
762 ifp->if_hwassist = 0;
763 if (ifp->if_capenable & IFCAP_TSO4)
764 ifp->if_hwassist |= CSUM_TSO;
765 if (ifp->if_capenable & IFCAP_TXCSUM) {
766 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
767 #if __FreeBSD_version >= 800000
768 ifp->if_hwassist |= CSUM_SCTP;
769 #endif
770 }
771 #endif
772
773 /* Set up VLAN offload and filter */
774 ixv_setup_vlan_support(adapter);
775
776 /* Set up MSI/X routing */
777 ixv_configure_ivars(adapter);
778
779 /* Set up auto-mask */
780 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
781
782 /* Set moderation on the Link interrupt */
783 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
784
785 /* Stats init */
786 ixv_init_stats(adapter);
787
788 /* Config/Enable Link */
789 ixv_config_link(adapter);
790 hw->mac.get_link_status = TRUE;
791
792 /* Start watchdog */
793 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
794
795 /* And now turn on interrupts */
796 ixv_enable_intr(adapter);
797
798 /* Now inform the stack we're ready */
799 ifp->if_flags |= IFF_RUNNING;
800 ifp->if_flags &= ~IFF_OACTIVE;
801
802 return;
803 }
804
805 static int
806 ixv_init(struct ifnet *ifp)
807 {
808 struct adapter *adapter = ifp->if_softc;
809
810 IXGBE_CORE_LOCK(adapter);
811 ixv_init_locked(adapter);
812 IXGBE_CORE_UNLOCK(adapter);
813 return 0;
814 }
815
816
817 /*
818 **
819 ** MSIX Interrupt Handlers and Tasklets
820 **
821 */
822
823 static inline void
824 ixv_enable_queue(struct adapter *adapter, u32 vector)
825 {
826 struct ixgbe_hw *hw = &adapter->hw;
827 u32 queue = 1 << vector;
828 u32 mask;
829
830 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
831 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
832 }
833
834 static inline void
835 ixv_disable_queue(struct adapter *adapter, u32 vector)
836 {
837 struct ixgbe_hw *hw = &adapter->hw;
838 u64 queue = (u64)(1 << vector);
839 u32 mask;
840
841 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
842 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
843 }
844
845 static inline void
846 ixv_rearm_queues(struct adapter *adapter, u64 queues)
847 {
848 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
849 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
850 }
851
852
853 static void
854 ixv_handle_que(void *context)
855 {
856 struct ix_queue *que = context;
857 struct adapter *adapter = que->adapter;
858 struct tx_ring *txr = que->txr;
859 struct ifnet *ifp = adapter->ifp;
860 bool more;
861
862 if (ifp->if_flags & IFF_RUNNING) {
863 more = ixgbe_rxeof(que);
864 IXGBE_TX_LOCK(txr);
865 ixgbe_txeof(txr);
866 #ifndef IXGBE_LEGACY_TX
867 if (pcq_peek(txr->txr_interq) != NULL)
868 ixgbe_mq_start_locked(ifp, txr);
869 #else
870 if (!IFQ_IS_EMPTY(&ifp->if_snd))
871 ixgbe_start_locked(txr, ifp);
872 #endif
873 IXGBE_TX_UNLOCK(txr);
874 if (more) {
875 adapter->req.ev_count++;
876 softint_schedule(que->que_si);
877 return;
878 }
879 }
880
881 /* Reenable this interrupt */
882 ixv_enable_queue(adapter, que->msix);
883 return;
884 }
885
886 /*********************************************************************
887 *
888 * MSI Queue Interrupt Service routine
889 *
890 **********************************************************************/
891 int
892 ixv_msix_que(void *arg)
893 {
894 struct ix_queue *que = arg;
895 struct adapter *adapter = que->adapter;
896 struct ifnet *ifp = adapter->ifp;
897 struct tx_ring *txr = que->txr;
898 struct rx_ring *rxr = que->rxr;
899 bool more;
900 u32 newitr = 0;
901
902 ixv_disable_queue(adapter, que->msix);
903 ++que->irqs.ev_count;
904
905 #ifdef __NetBSD__
906 /* Don't run ixgbe_rxeof in interrupt context */
907 more = true;
908 #else
909 more = ixgbe_rxeof(que);
910 #endif
911
912 IXGBE_TX_LOCK(txr);
913 ixgbe_txeof(txr);
914 /*
915 ** Make certain that if the stack
916 ** has anything queued the task gets
917 ** scheduled to handle it.
918 */
919 #ifdef IXGBE_LEGACY_TX
920 if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
921 ixgbe_start_locked(txr, ifp);
922 #else
923 if (pcq_peek(txr->txr_interq) != NULL)
924 ixgbe_mq_start_locked(ifp, txr);
925 #endif
926 IXGBE_TX_UNLOCK(txr);
927
928 /* Do AIM now? */
929
930 if (ixv_enable_aim == FALSE)
931 goto no_calc;
932 /*
933 ** Do Adaptive Interrupt Moderation:
934 ** - Write out last calculated setting
935 ** - Calculate based on average size over
936 ** the last interval.
937 */
938 if (que->eitr_setting)
939 IXGBE_WRITE_REG(&adapter->hw,
940 IXGBE_VTEITR(que->msix),
941 que->eitr_setting);
942
943 que->eitr_setting = 0;
944
945 /* Idle, do nothing */
946 if ((txr->bytes == 0) && (rxr->bytes == 0))
947 goto no_calc;
948
949 if ((txr->bytes) && (txr->packets))
950 newitr = txr->bytes/txr->packets;
951 if ((rxr->bytes) && (rxr->packets))
952 newitr = max(newitr,
953 (rxr->bytes / rxr->packets));
954 newitr += 24; /* account for hardware frame, crc */
955
956 /* set an upper boundary */
957 newitr = min(newitr, 3000);
958
959 /* Be nice to the mid range */
960 if ((newitr > 300) && (newitr < 1200))
961 newitr = (newitr / 3);
962 else
963 newitr = (newitr / 2);
964
965 newitr |= newitr << 16;
966
967 /* save for next interrupt */
968 que->eitr_setting = newitr;
969
970 /* Reset state */
971 txr->bytes = 0;
972 txr->packets = 0;
973 rxr->bytes = 0;
974 rxr->packets = 0;
975
976 no_calc:
977 if (more)
978 softint_schedule(que->que_si);
979 else /* Reenable this interrupt */
980 ixv_enable_queue(adapter, que->msix);
981 return 1;
982 }
983
984 static int
985 ixv_msix_mbx(void *arg)
986 {
987 struct adapter *adapter = arg;
988 struct ixgbe_hw *hw = &adapter->hw;
989 u32 reg;
990
991 ++adapter->link_irq.ev_count;
992
993 /* First get the cause */
994 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
995 /* Clear interrupt with write */
996 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
997
998 /* Link status change */
999 if (reg & IXGBE_EICR_LSC)
1000 softint_schedule(adapter->link_si);
1001
1002 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1003 return 1;
1004 }
1005
1006 /*********************************************************************
1007 *
1008 * Media Ioctl callback
1009 *
1010 * This routine is called whenever the user queries the status of
1011 * the interface using ifconfig.
1012 *
1013 **********************************************************************/
1014 static void
1015 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1016 {
1017 struct adapter *adapter = ifp->if_softc;
1018
1019 INIT_DEBUGOUT("ixv_media_status: begin");
1020 IXGBE_CORE_LOCK(adapter);
1021 ixv_update_link_status(adapter);
1022
1023 ifmr->ifm_status = IFM_AVALID;
1024 ifmr->ifm_active = IFM_ETHER;
1025
1026 if (!adapter->link_active) {
1027 ifmr->ifm_active |= IFM_NONE;
1028 IXGBE_CORE_UNLOCK(adapter);
1029 return;
1030 }
1031
1032 ifmr->ifm_status |= IFM_ACTIVE;
1033
1034 switch (adapter->link_speed) {
1035 case IXGBE_LINK_SPEED_10GB_FULL:
1036 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1037 break;
1038 case IXGBE_LINK_SPEED_1GB_FULL:
1039 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1040 break;
1041 case IXGBE_LINK_SPEED_100_FULL:
1042 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1043 break;
1044 }
1045
1046 IXGBE_CORE_UNLOCK(adapter);
1047
1048 return;
1049 }
1050
1051 /*********************************************************************
1052 *
1053 * Media Ioctl callback
1054 *
1055 * This routine is called when the user changes speed/duplex using
1056 * media/mediopt option with ifconfig.
1057 *
1058 **********************************************************************/
1059 static int
1060 ixv_media_change(struct ifnet * ifp)
1061 {
1062 struct adapter *adapter = ifp->if_softc;
1063 struct ifmedia *ifm = &adapter->media;
1064
1065 INIT_DEBUGOUT("ixv_media_change: begin");
1066
1067 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1068 return (EINVAL);
1069
1070 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1071 case IFM_AUTO:
1072 break;
1073 default:
1074 device_printf(adapter->dev, "Only auto media type\n");
1075 return (EINVAL);
1076 }
1077
1078 return (0);
1079 }
1080
1081
1082 /*********************************************************************
1083 * Multicast Update
1084 *
1085 * This routine is called whenever multicast address list is updated.
1086 *
1087 **********************************************************************/
1088 #define IXGBE_RAR_ENTRIES 16
1089
1090 static void
1091 ixv_set_multi(struct adapter *adapter)
1092 {
1093 struct ether_multi *enm;
1094 struct ether_multistep step;
1095 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1096 u8 *update_ptr;
1097 int mcnt = 0;
1098 struct ethercom *ec = &adapter->osdep.ec;
1099
1100 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1101
1102 ETHER_FIRST_MULTI(step, ec, enm);
1103 while (enm != NULL) {
1104 bcopy(enm->enm_addrlo,
1105 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1106 IXGBE_ETH_LENGTH_OF_ADDRESS);
1107 mcnt++;
1108 /* XXX This might be required --msaitoh */
1109 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1110 break;
1111 ETHER_NEXT_MULTI(step, enm);
1112 }
1113
1114 update_ptr = mta;
1115
1116 ixgbe_update_mc_addr_list(&adapter->hw,
1117 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1118
1119 return;
1120 }
1121
1122 /*
1123 * This is an iterator function now needed by the multicast
1124 * shared code. It simply feeds the shared code routine the
1125 * addresses in the array of ixv_set_multi() one by one.
1126 */
1127 static u8 *
1128 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1129 {
1130 u8 *addr = *update_ptr;
1131 u8 *newptr;
1132 *vmdq = 0;
1133
1134 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1135 *update_ptr = newptr;
1136 return addr;
1137 }
1138
1139 /*********************************************************************
1140 * Timer routine
1141 *
1142 * This routine checks for link status,updates statistics,
1143 * and runs the watchdog check.
1144 *
1145 **********************************************************************/
1146
1147 static void
1148 ixv_local_timer(void *arg)
1149 {
1150 struct adapter *adapter = arg;
1151
1152 IXGBE_CORE_LOCK(adapter);
1153 ixv_local_timer_locked(adapter);
1154 IXGBE_CORE_UNLOCK(adapter);
1155 }
1156
1157 static void
1158 ixv_local_timer_locked(void *arg)
1159 {
1160 struct adapter *adapter = arg;
1161 device_t dev = adapter->dev;
1162 struct ix_queue *que = adapter->queues;
1163 u64 queues = 0;
1164 int hung = 0;
1165
1166 KASSERT(mutex_owned(&adapter->core_mtx));
1167
1168 ixv_update_link_status(adapter);
1169
1170 /* Stats Update */
1171 ixv_update_stats(adapter);
1172
1173 /*
1174 ** Check the TX queues status
1175 ** - mark hung queues so we don't schedule on them
1176 ** - watchdog only if all queues show hung
1177 */
1178 for (int i = 0; i < adapter->num_queues; i++, que++) {
1179 /* Keep track of queues with work for soft irq */
1180 if (que->txr->busy)
1181 queues |= ((u64)1 << que->me);
1182 /*
1183 ** Each time txeof runs without cleaning, but there
1184 ** are uncleaned descriptors it increments busy. If
1185 ** we get to the MAX we declare it hung.
1186 */
1187 if (que->busy == IXGBE_QUEUE_HUNG) {
1188 ++hung;
1189 /* Mark the queue as inactive */
1190 adapter->active_queues &= ~((u64)1 << que->me);
1191 continue;
1192 } else {
1193 /* Check if we've come back from hung */
1194 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1195 adapter->active_queues |= ((u64)1 << que->me);
1196 }
1197 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1198 device_printf(dev,"Warning queue %d "
1199 "appears to be hung!\n", i);
1200 que->txr->busy = IXGBE_QUEUE_HUNG;
1201 ++hung;
1202 }
1203
1204 }
1205
1206 /* Only truly watchdog if all queues show hung */
1207 if (hung == adapter->num_queues)
1208 goto watchdog;
1209 else if (queues != 0) { /* Force an IRQ on queues with work */
1210 ixv_rearm_queues(adapter, queues);
1211 }
1212
1213 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1214 return;
1215
1216 watchdog:
1217 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1218 adapter->ifp->if_flags &= ~IFF_RUNNING;
1219 adapter->watchdog_events.ev_count++;
1220 ixv_init_locked(adapter);
1221 }
1222
1223 /*
1224 ** Note: this routine updates the OS on the link state
1225 ** the real check of the hardware only happens with
1226 ** a link interrupt.
1227 */
1228 static void
1229 ixv_update_link_status(struct adapter *adapter)
1230 {
1231 struct ifnet *ifp = adapter->ifp;
1232 device_t dev = adapter->dev;
1233
1234 if (adapter->link_up){
1235 if (adapter->link_active == FALSE) {
1236 if (bootverbose) {
1237 const char *bpsmsg;
1238
1239 switch (adapter->link_speed) {
1240 case IXGBE_LINK_SPEED_10GB_FULL:
1241 bpsmsg = "10 Gbps";
1242 break;
1243 case IXGBE_LINK_SPEED_1GB_FULL:
1244 bpsmsg = "1 Gbps";
1245 break;
1246 case IXGBE_LINK_SPEED_100_FULL:
1247 bpsmsg = "100 Mbps";
1248 break;
1249 default:
1250 bpsmsg = "unknown speed";
1251 break;
1252 }
1253 device_printf(dev,"Link is up %s %s \n",
1254 bpsmsg, "Full Duplex");
1255 }
1256 adapter->link_active = TRUE;
1257 if_link_state_change(ifp, LINK_STATE_UP);
1258 }
1259 } else { /* Link down */
1260 if (adapter->link_active == TRUE) {
1261 if (bootverbose)
1262 device_printf(dev,"Link is Down\n");
1263 if_link_state_change(ifp, LINK_STATE_DOWN);
1264 adapter->link_active = FALSE;
1265 }
1266 }
1267
1268 return;
1269 }
1270
1271
1272 static void
1273 ixv_ifstop(struct ifnet *ifp, int disable)
1274 {
1275 struct adapter *adapter = ifp->if_softc;
1276
1277 IXGBE_CORE_LOCK(adapter);
1278 ixv_stop(adapter);
1279 IXGBE_CORE_UNLOCK(adapter);
1280 }
1281
1282 /*********************************************************************
1283 *
1284 * This routine disables all traffic on the adapter by issuing a
1285 * global reset on the MAC and deallocates TX/RX buffers.
1286 *
1287 **********************************************************************/
1288
1289 static void
1290 ixv_stop(void *arg)
1291 {
1292 struct ifnet *ifp;
1293 struct adapter *adapter = arg;
1294 struct ixgbe_hw *hw = &adapter->hw;
1295 ifp = adapter->ifp;
1296
1297 KASSERT(mutex_owned(&adapter->core_mtx));
1298
1299 INIT_DEBUGOUT("ixv_stop: begin\n");
1300 ixv_disable_intr(adapter);
1301
1302 /* Tell the stack that the interface is no longer active */
1303 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1304
1305 ixgbe_reset_hw(hw);
1306 adapter->hw.adapter_stopped = FALSE;
1307 ixgbe_stop_adapter(hw);
1308 callout_stop(&adapter->timer);
1309
1310 /* reprogram the RAR[0] in case user changed it. */
1311 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1312
1313 return;
1314 }
1315
1316
1317 /*********************************************************************
1318 *
1319 * Determine hardware revision.
1320 *
1321 **********************************************************************/
1322 static void
1323 ixv_identify_hardware(struct adapter *adapter)
1324 {
1325 pcitag_t tag;
1326 pci_chipset_tag_t pc;
1327 pcireg_t subid, id;
1328 struct ixgbe_hw *hw = &adapter->hw;
1329
1330 pc = adapter->osdep.pc;
1331 tag = adapter->osdep.tag;
1332
1333 /*
1334 ** Make sure BUSMASTER is set, on a VM under
1335 ** KVM it may not be and will break things.
1336 */
1337 ixgbe_pci_enable_busmaster(pc, tag);
1338
1339 id = pci_conf_read(pc, tag, PCI_ID_REG);
1340 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1341
1342 /* Save off the information about this board */
1343 hw->vendor_id = PCI_VENDOR(id);
1344 hw->device_id = PCI_PRODUCT(id);
1345 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1346 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1347 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1348
1349 /* We need this to determine device-specific things */
1350 ixgbe_set_mac_type(hw);
1351
1352 /* Set the right number of segments */
1353 adapter->num_segs = IXGBE_82599_SCATTER;
1354
1355 return;
1356 }
1357
1358 /*********************************************************************
1359 *
1360 * Setup MSIX Interrupt resources and handlers
1361 *
1362 **********************************************************************/
1363 static int
1364 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1365 {
1366 device_t dev = adapter->dev;
1367 struct ix_queue *que = adapter->queues;
1368 struct tx_ring *txr = adapter->tx_rings;
1369 int error, rid, vector = 0;
1370 pci_chipset_tag_t pc;
1371 pcitag_t tag;
1372 char intrbuf[PCI_INTRSTR_LEN];
1373 char intr_xname[32];
1374 const char *intrstr = NULL;
1375 kcpuset_t *affinity;
1376 int cpu_id = 0;
1377
1378 pc = adapter->osdep.pc;
1379 tag = adapter->osdep.tag;
1380
1381 adapter->osdep.nintrs = adapter->num_queues + 1;
1382 if (pci_msix_alloc_exact(pa,
1383 &adapter->osdep.intrs, adapter->osdep.nintrs) != 0)
1384 return (ENXIO);
1385
1386 kcpuset_create(&affinity, false);
1387 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1388 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
1389 device_xname(dev), i);
1390 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1391 sizeof(intrbuf));
1392 #ifdef IXV_MPSAFE
1393 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1394 true);
1395 #endif
1396 /* Set the handler function */
1397 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
1398 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
1399 intr_xname);
1400 if (que->res == NULL) {
1401 pci_intr_release(pc, adapter->osdep.intrs,
1402 adapter->osdep.nintrs);
1403 aprint_error_dev(dev,
1404 "Failed to register QUE handler");
1405 kcpuset_destroy(affinity);
1406 return (ENXIO);
1407 }
1408 que->msix = vector;
1409 adapter->active_queues |= (u64)(1 << que->msix);
1410
1411 cpu_id = i;
1412 /* Round-robin affinity */
1413 kcpuset_zero(affinity);
1414 kcpuset_set(affinity, cpu_id % ncpu);
1415 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1416 NULL);
1417 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1418 intrstr);
1419 if (error == 0)
1420 aprint_normal(", bound queue %d to cpu %d\n",
1421 i, cpu_id);
1422 else
1423 aprint_normal("\n");
1424
1425 #ifndef IXGBE_LEGACY_TX
1426 txr->txr_si = softint_establish(SOFTINT_NET,
1427 ixgbe_deferred_mq_start, txr);
1428 #endif
1429 que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1430 que);
1431 if (que->que_si == NULL) {
1432 aprint_error_dev(dev,
1433 "could not establish software interrupt\n");
1434 }
1435 }
1436
1437 /* and Mailbox */
1438 cpu_id++;
1439 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
1440 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1441 sizeof(intrbuf));
1442 #ifdef IXG_MPSAFE
1443 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
1444 #endif
1445 /* Set the mbx handler function */
1446 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
1447 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
1448 intr_xname);
1449 if (adapter->osdep.ihs[vector] == NULL) {
1450 adapter->res = NULL;
1451 aprint_error_dev(dev, "Failed to register LINK handler\n");
1452 kcpuset_destroy(affinity);
1453 return (ENXIO);
1454 }
1455 /* Round-robin affinity */
1456 kcpuset_zero(affinity);
1457 kcpuset_set(affinity, cpu_id % ncpu);
1458 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1459
1460 aprint_normal_dev(dev,
1461 "for link, interrupting at %s, ", intrstr);
1462 if (error == 0) {
1463 aprint_normal("affinity to cpu %d\n", cpu_id);
1464 }
1465 adapter->vector = vector;
1466 /* Tasklets for Mailbox */
1467 adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1468 adapter);
1469 /*
1470 ** Due to a broken design QEMU will fail to properly
1471 ** enable the guest for MSIX unless the vectors in
1472 ** the table are all set up, so we must rewrite the
1473 ** ENABLE in the MSIX control register again at this
1474 ** point to cause it to successfully initialize us.
1475 */
1476 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1477 int msix_ctrl;
1478 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1479 rid += PCI_MSIX_CTL;
1480 msix_ctrl = pci_conf_read(pc, tag, rid);
1481 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1482 pci_conf_write(pc, tag, rid, msix_ctrl);
1483 }
1484
1485 kcpuset_destroy(affinity);
1486 return (0);
1487 }
1488
1489 /*
1490 * Setup MSIX resources, note that the VF
1491 * device MUST use MSIX, there is no fallback.
1492 */
1493 static int
1494 ixv_setup_msix(struct adapter *adapter)
1495 {
1496 device_t dev = adapter->dev;
1497 int want, queues, msgs;
1498
1499 /* Must have at least 2 MSIX vectors */
1500 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1501 if (msgs < 2) {
1502 aprint_error_dev(dev,"MSIX config error\n");
1503 return (ENXIO);
1504 }
1505 msgs = MIN(msgs, IXG_MAX_NINTR);
1506
1507 /* Figure out a reasonable auto config value */
1508 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
1509
1510 if (ixv_num_queues != 0)
1511 queues = ixv_num_queues;
1512 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
1513 queues = IXGBE_VF_MAX_TX_QUEUES;
1514
1515 /*
1516 ** Want vectors for the queues,
1517 ** plus an additional for mailbox.
1518 */
1519 want = queues + 1;
1520 if (msgs >= want) {
1521 msgs = want;
1522 } else {
1523 aprint_error_dev(dev,
1524 "MSIX Configuration Problem, "
1525 "%d vectors but %d queues wanted!\n",
1526 msgs, want);
1527 return -1;
1528 }
1529
1530 adapter->msix_mem = (void *)1; /* XXX */
1531 aprint_normal_dev(dev,
1532 "Using MSIX interrupts with %d vectors\n", msgs);
1533 adapter->num_queues = queues;
1534 return (msgs);
1535 }
1536
1537
1538 static int
1539 ixv_allocate_pci_resources(struct adapter *adapter,
1540 const struct pci_attach_args *pa)
1541 {
1542 pcireg_t memtype;
1543 device_t dev = adapter->dev;
1544 bus_addr_t addr;
1545 int flags;
1546
1547 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1548
1549 switch (memtype) {
1550 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1551 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1552 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1553 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1554 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1555 goto map_err;
1556 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1557 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1558 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1559 }
1560 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1561 adapter->osdep.mem_size, flags,
1562 &adapter->osdep.mem_bus_space_handle) != 0) {
1563 map_err:
1564 adapter->osdep.mem_size = 0;
1565 aprint_error_dev(dev, "unable to map BAR0\n");
1566 return ENXIO;
1567 }
1568 break;
1569 default:
1570 aprint_error_dev(dev, "unexpected type on BAR0\n");
1571 return ENXIO;
1572 }
1573
1574 /* Pick up the tuneable queues */
1575 adapter->num_queues = ixv_num_queues;
1576 adapter->hw.back = adapter;
1577
1578 /*
1579 ** Now setup MSI/X, should
1580 ** return us the number of
1581 ** configured vectors.
1582 */
1583 adapter->msix = ixv_setup_msix(adapter);
1584 if (adapter->msix == ENXIO)
1585 return (ENXIO);
1586 else
1587 return (0);
1588 }
1589
1590 static void
1591 ixv_free_pci_resources(struct adapter * adapter)
1592 {
1593 struct ix_queue *que = adapter->queues;
1594 int rid;
1595
1596 /*
1597 ** Release all msix queue resources:
1598 */
1599 for (int i = 0; i < adapter->num_queues; i++, que++) {
1600 if (que->res != NULL)
1601 pci_intr_disestablish(adapter->osdep.pc,
1602 adapter->osdep.ihs[i]);
1603 }
1604
1605
1606 /* Clean the Legacy or Link interrupt last */
1607 if (adapter->vector) /* we are doing MSIX */
1608 rid = adapter->vector;
1609 else
1610 rid = 0;
1611
1612 if (adapter->osdep.ihs[rid] != NULL) {
1613 pci_intr_disestablish(adapter->osdep.pc,
1614 adapter->osdep.ihs[rid]);
1615 adapter->osdep.ihs[rid] = NULL;
1616 }
1617
1618 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1619 adapter->osdep.nintrs);
1620
1621 if (adapter->osdep.mem_size != 0) {
1622 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1623 adapter->osdep.mem_bus_space_handle,
1624 adapter->osdep.mem_size);
1625 }
1626
1627 return;
1628 }
1629
1630 /*********************************************************************
1631 *
1632 * Setup networking device structure and register an interface.
1633 *
1634 **********************************************************************/
1635 static void
1636 ixv_setup_interface(device_t dev, struct adapter *adapter)
1637 {
1638 struct ethercom *ec = &adapter->osdep.ec;
1639 struct ifnet *ifp;
1640
1641 INIT_DEBUGOUT("ixv_setup_interface: begin");
1642
1643 ifp = adapter->ifp = &ec->ec_if;
1644 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1645 ifp->if_baudrate = 1000000000;
1646 ifp->if_init = ixv_init;
1647 ifp->if_stop = ixv_ifstop;
1648 ifp->if_softc = adapter;
1649 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1650 ifp->if_ioctl = ixv_ioctl;
1651 #ifndef IXGBE_LEGACY_TX
1652 ifp->if_transmit = ixgbe_mq_start;
1653 #endif
1654 ifp->if_start = ixgbe_start;
1655 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1656 IFQ_SET_READY(&ifp->if_snd);
1657
1658 if_initialize(ifp);
1659 ether_ifattach(ifp, adapter->hw.mac.addr);
1660 #ifndef IXGBE_LEGACY_TX
1661 #if 0 /* We use per TX queue softint */
1662 if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
1663 #endif
1664 #endif
1665 if_register(ifp);
1666 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1667
1668 adapter->max_frame_size =
1669 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1670
1671 /*
1672 * Tell the upper layer(s) we support long frames.
1673 */
1674 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1675
1676 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1677 ifp->if_capenable = 0;
1678
1679 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1680 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1681 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1682 | ETHERCAP_VLAN_MTU;
1683 ec->ec_capenable = ec->ec_capabilities;
1684
1685 /* Don't enable LRO by default */
1686 ifp->if_capabilities |= IFCAP_LRO;
1687 #if 0
1688 ifp->if_capenable = ifp->if_capabilities;
1689 #endif
1690
1691 /*
1692 ** Dont turn this on by default, if vlans are
1693 ** created on another pseudo device (eg. lagg)
1694 ** then vlan events are not passed thru, breaking
1695 ** operation, but with HW FILTER off it works. If
1696 ** using vlans directly on the em driver you can
1697 ** enable this and get full hardware tag filtering.
1698 */
1699 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1700
1701 /*
1702 * Specify the media types supported by this adapter and register
1703 * callbacks to update media and link information
1704 */
1705 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1706 ixv_media_status);
1707 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1708 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1709
1710 return;
1711 }
1712
1713 static void
1714 ixv_config_link(struct adapter *adapter)
1715 {
1716 struct ixgbe_hw *hw = &adapter->hw;
1717
1718 if (hw->mac.ops.check_link)
1719 hw->mac.ops.check_link(hw, &adapter->link_speed,
1720 &adapter->link_up, FALSE);
1721 }
1722
1723
1724 /*********************************************************************
1725 *
1726 * Enable transmit unit.
1727 *
1728 **********************************************************************/
1729 static void
1730 ixv_initialize_transmit_units(struct adapter *adapter)
1731 {
1732 struct tx_ring *txr = adapter->tx_rings;
1733 struct ixgbe_hw *hw = &adapter->hw;
1734
1735
1736 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1737 u64 tdba = txr->txdma.dma_paddr;
1738 u32 txctrl, txdctl;
1739
1740 /* Set WTHRESH to 8, burst writeback */
1741 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1742 txdctl |= (8 << 16);
1743 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1744
1745 /* Set the HW Tx Head and Tail indices */
1746 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1747 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1748
1749 /* Set Tx Tail register */
1750 txr->tail = IXGBE_VFTDT(i);
1751
1752 /* Set Ring parameters */
1753 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1754 (tdba & 0x00000000ffffffffULL));
1755 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1756 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1757 adapter->num_tx_desc *
1758 sizeof(struct ixgbe_legacy_tx_desc));
1759 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1760 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1761 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1762
1763 /* Now enable */
1764 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1765 txdctl |= IXGBE_TXDCTL_ENABLE;
1766 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1767 }
1768
1769 return;
1770 }
1771
1772
1773 /*********************************************************************
1774 *
1775 * Setup receive registers and features.
1776 *
1777 **********************************************************************/
1778 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1779
1780 static void
1781 ixv_initialize_receive_units(struct adapter *adapter)
1782 {
1783 struct rx_ring *rxr = adapter->rx_rings;
1784 struct ixgbe_hw *hw = &adapter->hw;
1785 struct ifnet *ifp = adapter->ifp;
1786 u32 bufsz, rxcsum, psrtype;
1787
1788 if (ifp->if_mtu > ETHERMTU)
1789 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1790 else
1791 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1792
1793 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1794 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1795 IXGBE_PSRTYPE_L2HDR;
1796
1797 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1798
1799 /* Tell PF our max_frame size */
1800 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1801
1802 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1803 u64 rdba = rxr->rxdma.dma_paddr;
1804 u32 reg, rxdctl;
1805
1806 /* Disable the queue */
1807 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1808 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1809 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1810 for (int j = 0; j < 10; j++) {
1811 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1812 IXGBE_RXDCTL_ENABLE)
1813 msec_delay(1);
1814 else
1815 break;
1816 }
1817 wmb();
1818 /* Setup the Base and Length of the Rx Descriptor Ring */
1819 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1820 (rdba & 0x00000000ffffffffULL));
1821 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1822 (rdba >> 32));
1823 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1824 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1825
1826 /* Reset the ring indices */
1827 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1828 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1829
1830 /* Set up the SRRCTL register */
1831 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1832 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1833 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1834 reg |= bufsz;
1835 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1836 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1837
1838 /* Capture Rx Tail index */
1839 rxr->tail = IXGBE_VFRDT(rxr->me);
1840
1841 /* Do the queue enabling last */
1842 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1843 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1844 for (int k = 0; k < 10; k++) {
1845 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1846 IXGBE_RXDCTL_ENABLE)
1847 break;
1848 else
1849 msec_delay(1);
1850 }
1851 wmb();
1852
1853 /* Set the Tail Pointer */
1854 #ifdef DEV_NETMAP
1855 /*
1856 * In netmap mode, we must preserve the buffers made
1857 * available to userspace before the if_init()
1858 * (this is true by default on the TX side, because
1859 * init makes all buffers available to userspace).
1860 *
1861 * netmap_reset() and the device specific routines
1862 * (e.g. ixgbe_setup_receive_rings()) map these
1863 * buffers at the end of the NIC ring, so here we
1864 * must set the RDT (tail) register to make sure
1865 * they are not overwritten.
1866 *
1867 * In this driver the NIC ring starts at RDH = 0,
1868 * RDT points to the last slot available for reception (?),
1869 * so RDT = num_rx_desc - 1 means the whole ring is available.
1870 */
1871 if (ifp->if_capenable & IFCAP_NETMAP) {
1872 struct netmap_adapter *na = NA(adapter->ifp);
1873 struct netmap_kring *kring = &na->rx_rings[i];
1874 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1875
1876 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1877 } else
1878 #endif /* DEV_NETMAP */
1879 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1880 adapter->num_rx_desc - 1);
1881 }
1882
1883 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1884
1885 if (ifp->if_capenable & IFCAP_RXCSUM)
1886 rxcsum |= IXGBE_RXCSUM_PCSD;
1887
1888 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1889 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1890
1891 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1892
1893 return;
1894 }
1895
1896 static void
1897 ixv_setup_vlan_support(struct adapter *adapter)
1898 {
1899 struct ixgbe_hw *hw = &adapter->hw;
1900 u32 ctrl, vid, vfta, retry;
1901 struct rx_ring *rxr;
1902
1903 /*
1904 ** We get here thru init_locked, meaning
1905 ** a soft reset, this has already cleared
1906 ** the VFTA and other state, so if there
1907 ** have been no vlan's registered do nothing.
1908 */
1909 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1910 return;
1911
1912 /* Enable the queues */
1913 for (int i = 0; i < adapter->num_queues; i++) {
1914 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1915 ctrl |= IXGBE_RXDCTL_VME;
1916 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1917 /*
1918 * Let Rx path know that it needs to store VLAN tag
1919 * as part of extra mbuf info.
1920 */
1921 rxr = &adapter->rx_rings[i];
1922 rxr->vtag_strip = TRUE;
1923 }
1924
1925 /*
1926 ** A soft reset zero's out the VFTA, so
1927 ** we need to repopulate it now.
1928 */
1929 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1930 if (ixv_shadow_vfta[i] == 0)
1931 continue;
1932 vfta = ixv_shadow_vfta[i];
1933 /*
1934 ** Reconstruct the vlan id's
1935 ** based on the bits set in each
1936 ** of the array ints.
1937 */
1938 for (int j = 0; j < 32; j++) {
1939 retry = 0;
1940 if ((vfta & (1 << j)) == 0)
1941 continue;
1942 vid = (i * 32) + j;
1943 /* Call the shared code mailbox routine */
1944 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1945 if (++retry > 5)
1946 break;
1947 }
1948 }
1949 }
1950 }
1951
1952 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1953 /*
1954 ** This routine is run via an vlan config EVENT,
1955 ** it enables us to use the HW Filter table since
1956 ** we can get the vlan id. This just creates the
1957 ** entry in the soft version of the VFTA, init will
1958 ** repopulate the real table.
1959 */
1960 static void
1961 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1962 {
1963 struct adapter *adapter = ifp->if_softc;
1964 u16 index, bit;
1965
1966 if (ifp->if_softc != arg) /* Not our event */
1967 return;
1968
1969 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1970 return;
1971
1972 IXGBE_CORE_LOCK(adapter);
1973 index = (vtag >> 5) & 0x7F;
1974 bit = vtag & 0x1F;
1975 ixv_shadow_vfta[index] |= (1 << bit);
1976 /* Re-init to load the changes */
1977 ixv_init_locked(adapter);
1978 IXGBE_CORE_UNLOCK(adapter);
1979 }
1980
1981 /*
1982 ** This routine is run via an vlan
1983 ** unconfig EVENT, remove our entry
1984 ** in the soft vfta.
1985 */
1986 static void
1987 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1988 {
1989 struct adapter *adapter = ifp->if_softc;
1990 u16 index, bit;
1991
1992 if (ifp->if_softc != arg)
1993 return;
1994
1995 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1996 return;
1997
1998 IXGBE_CORE_LOCK(adapter);
1999 index = (vtag >> 5) & 0x7F;
2000 bit = vtag & 0x1F;
2001 ixv_shadow_vfta[index] &= ~(1 << bit);
2002 /* Re-init to load the changes */
2003 ixv_init_locked(adapter);
2004 IXGBE_CORE_UNLOCK(adapter);
2005 }
2006 #endif
2007
2008 static void
2009 ixv_enable_intr(struct adapter *adapter)
2010 {
2011 struct ixgbe_hw *hw = &adapter->hw;
2012 struct ix_queue *que = adapter->queues;
2013 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2014
2015
2016 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
2017
2018 mask = IXGBE_EIMS_ENABLE_MASK;
2019 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
2020 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2021
2022 for (int i = 0; i < adapter->num_queues; i++, que++)
2023 ixv_enable_queue(adapter, que->msix);
2024
2025 IXGBE_WRITE_FLUSH(hw);
2026
2027 return;
2028 }
2029
2030 static void
2031 ixv_disable_intr(struct adapter *adapter)
2032 {
2033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2034 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
2035 IXGBE_WRITE_FLUSH(&adapter->hw);
2036 return;
2037 }
2038
2039 /*
2040 ** Setup the correct IVAR register for a particular MSIX interrupt
2041 ** - entry is the register array entry
2042 ** - vector is the MSIX vector for this queue
2043 ** - type is RX/TX/MISC
2044 */
2045 static void
2046 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2047 {
2048 struct ixgbe_hw *hw = &adapter->hw;
2049 u32 ivar, index;
2050
2051 vector |= IXGBE_IVAR_ALLOC_VAL;
2052
2053 if (type == -1) { /* MISC IVAR */
2054 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2055 ivar &= ~0xFF;
2056 ivar |= vector;
2057 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2058 } else { /* RX/TX IVARS */
2059 index = (16 * (entry & 1)) + (8 * type);
2060 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2061 ivar &= ~(0xFF << index);
2062 ivar |= (vector << index);
2063 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2064 }
2065 }
2066
2067 static void
2068 ixv_configure_ivars(struct adapter *adapter)
2069 {
2070 struct ix_queue *que = adapter->queues;
2071
2072 for (int i = 0; i < adapter->num_queues; i++, que++) {
2073 /* First the RX queue entry */
2074 ixv_set_ivar(adapter, i, que->msix, 0);
2075 /* ... and the TX */
2076 ixv_set_ivar(adapter, i, que->msix, 1);
2077 /* Set an initial value in EITR */
2078 IXGBE_WRITE_REG(&adapter->hw,
2079 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
2080 }
2081
2082 /* For the mailbox interrupt */
2083 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2084 }
2085
2086
2087 /*
2088 ** Tasklet handler for MSIX MBX interrupts
2089 ** - do outside interrupt since it might sleep
2090 */
2091 static void
2092 ixv_handle_mbx(void *context)
2093 {
2094 struct adapter *adapter = context;
2095
2096 ixgbe_check_link(&adapter->hw,
2097 &adapter->link_speed, &adapter->link_up, 0);
2098 ixv_update_link_status(adapter);
2099 }
2100
2101 /*
2102 ** The VF stats registers never have a truly virgin
2103 ** starting point, so this routine tries to make an
2104 ** artificial one, marking ground zero on attach as
2105 ** it were.
2106 */
2107 static void
2108 ixv_save_stats(struct adapter *adapter)
2109 {
2110 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2111
2112 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2113 stats->saved_reset_vfgprc +=
2114 stats->vfgprc.ev_count - stats->base_vfgprc;
2115 stats->saved_reset_vfgptc +=
2116 stats->vfgptc.ev_count - stats->base_vfgptc;
2117 stats->saved_reset_vfgorc +=
2118 stats->vfgorc.ev_count - stats->base_vfgorc;
2119 stats->saved_reset_vfgotc +=
2120 stats->vfgotc.ev_count - stats->base_vfgotc;
2121 stats->saved_reset_vfmprc +=
2122 stats->vfmprc.ev_count - stats->base_vfmprc;
2123 }
2124 }
2125
2126 static void
2127 ixv_init_stats(struct adapter *adapter)
2128 {
2129 struct ixgbe_hw *hw = &adapter->hw;
2130
2131 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2132 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2133 adapter->stats.vf.last_vfgorc |=
2134 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2135
2136 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2137 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2138 adapter->stats.vf.last_vfgotc |=
2139 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2140
2141 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2142
2143 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2144 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2145 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2146 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2147 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2148 }
2149
2150 #define UPDATE_STAT_32(reg, last, count) \
2151 { \
2152 u32 current = IXGBE_READ_REG(hw, reg); \
2153 if (current < last) \
2154 count.ev_count += 0x100000000LL; \
2155 last = current; \
2156 count.ev_count &= 0xFFFFFFFF00000000LL; \
2157 count.ev_count |= current; \
2158 }
2159
2160 #define UPDATE_STAT_36(lsb, msb, last, count) \
2161 { \
2162 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2163 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2164 u64 current = ((cur_msb << 32) | cur_lsb); \
2165 if (current < last) \
2166 count.ev_count += 0x1000000000LL; \
2167 last = current; \
2168 count.ev_count &= 0xFFFFFFF000000000LL; \
2169 count.ev_count |= current; \
2170 }
2171
2172 /*
2173 ** ixv_update_stats - Update the board statistics counters.
2174 */
2175 void
2176 ixv_update_stats(struct adapter *adapter)
2177 {
2178 struct ixgbe_hw *hw = &adapter->hw;
2179
2180 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2181 adapter->stats.vf.vfgprc);
2182 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2183 adapter->stats.vf.vfgptc);
2184 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2185 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2186 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2187 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2188 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2189 adapter->stats.vf.vfmprc);
2190 }
2191
2192 /*
2193 * Add statistic sysctls for the VF.
2194 */
2195 static void
2196 ixv_add_stats_sysctls(struct adapter *adapter)
2197 {
2198 device_t dev = adapter->dev;
2199 struct ix_queue *que = &adapter->queues[0];
2200 struct tx_ring *txr = que->txr;
2201 struct rx_ring *rxr = que->rxr;
2202
2203 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2204
2205 const char *xname = device_xname(dev);
2206
2207 /* Driver Statistics */
2208 evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
2209 NULL, xname, "Driver dropped packets");
2210 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2211 NULL, xname, "m_defrag() failed");
2212 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2213 NULL, xname, "Watchdog timeouts");
2214
2215 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2216 xname, "Good Packets Received");
2217 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2218 xname, "Good Octets Received");
2219 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2220 xname, "Multicast Packets Received");
2221 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2222 xname, "Good Packets Transmitted");
2223 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2224 xname, "Good Octets Transmitted");
2225 evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
2226 xname, "IRQs on queue");
2227 evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
2228 xname, "RX irqs on queue");
2229 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
2230 xname, "RX packets");
2231 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
2232 xname, "RX bytes");
2233 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
2234 xname, "Discarded RX packets");
2235 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
2236 xname, "TX Packets");
2237 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
2238 xname, "# of times not enough descriptors were available during TX");
2239 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
2240 xname, "TX TSO");
2241 }
2242
2243 static void
2244 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2245 const char *description, int *limit, int value)
2246 {
2247 device_t dev = adapter->dev;
2248 struct sysctllog **log;
2249 const struct sysctlnode *rnode, *cnode;
2250
2251 log = &adapter->sysctllog;
2252 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2253 aprint_error_dev(dev, "could not create sysctl root\n");
2254 return;
2255 }
2256 if (sysctl_createv(log, 0, &rnode, &cnode,
2257 CTLFLAG_READWRITE, CTLTYPE_INT,
2258 name, SYSCTL_DESCR(description),
2259 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2260 aprint_error_dev(dev, "could not create sysctl\n");
2261 *limit = value;
2262 }
2263
2264 /**********************************************************************
2265 *
2266 * This routine is called only when em_display_debug_stats is enabled.
2267 * This routine provides a way to take a look at important statistics
2268 * maintained by the driver and hardware.
2269 *
2270 **********************************************************************/
2271 static void
2272 ixv_print_debug_info(struct adapter *adapter)
2273 {
2274 device_t dev = adapter->dev;
2275 struct ixgbe_hw *hw = &adapter->hw;
2276 struct ix_queue *que = adapter->queues;
2277 struct rx_ring *rxr;
2278 struct tx_ring *txr;
2279 #ifdef LRO
2280 struct lro_ctrl *lro;
2281 #endif /* LRO */
2282
2283 device_printf(dev,"Error Byte Count = %u \n",
2284 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2285
2286 for (int i = 0; i < adapter->num_queues; i++, que++) {
2287 txr = que->txr;
2288 rxr = que->rxr;
2289 #ifdef LRO
2290 lro = &rxr->lro;
2291 #endif /* LRO */
2292 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2293 que->msix, (long)que->irqs.ev_count);
2294 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2295 rxr->me, (long long)rxr->rx_packets.ev_count);
2296 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2297 rxr->me, (long)rxr->rx_bytes.ev_count);
2298 #ifdef LRO
2299 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2300 rxr->me, (long long)lro->lro_queued);
2301 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2302 rxr->me, (long long)lro->lro_flushed);
2303 #endif /* LRO */
2304 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2305 txr->me, (long)txr->total_packets.ev_count);
2306 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2307 txr->me, (long)txr->no_desc_avail.ev_count);
2308 }
2309
2310 device_printf(dev,"MBX IRQ Handled: %lu\n",
2311 (long)adapter->link_irq.ev_count);
2312 return;
2313 }
2314
2315 static int
2316 ixv_sysctl_debug(SYSCTLFN_ARGS)
2317 {
2318 struct sysctlnode node;
2319 int error, result;
2320 struct adapter *adapter;
2321
2322 node = *rnode;
2323 adapter = (struct adapter *)node.sysctl_data;
2324 node.sysctl_data = &result;
2325 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2326
2327 if (error)
2328 return error;
2329
2330 if (result == 1)
2331 ixv_print_debug_info(adapter);
2332
2333 return 0;
2334 }
2335
2336 const struct sysctlnode *
2337 ixv_sysctl_instance(struct adapter *adapter)
2338 {
2339 const char *dvname;
2340 struct sysctllog **log;
2341 int rc;
2342 const struct sysctlnode *rnode;
2343
2344 log = &adapter->sysctllog;
2345 dvname = device_xname(adapter->dev);
2346
2347 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2348 0, CTLTYPE_NODE, dvname,
2349 SYSCTL_DESCR("ixv information and settings"),
2350 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2351 goto err;
2352
2353 return rnode;
2354 err:
2355 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2356 return NULL;
2357 }
2358