ixv.c revision 1.47 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
34 /*$NetBSD: ixv.c,v 1.47 2017/02/08 09:00:37 msaitoh Exp $*/
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "vlan.h"
41
42 /*********************************************************************
43 * Driver version
44 *********************************************************************/
45 char ixv_driver_version[] = "1.4.6-k";
46
47 /*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 /* required last entry */
64 {0, 0, 0, 0, 0}
65 };
66
67 /*********************************************************************
68 * Table of branding strings
69 *********************************************************************/
70
71 static const char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /*********************************************************************
76 * Function prototypes
77 *********************************************************************/
78 static int ixv_probe(device_t, cfdata_t, void *);
79 static void ixv_attach(device_t, device_t, void *);
80 static int ixv_detach(device_t, int);
81 #if 0
82 static int ixv_shutdown(device_t);
83 #endif
84 static int ixv_ioctl(struct ifnet *, u_long, void *);
85 static int ixv_init(struct ifnet *);
86 static void ixv_init_locked(struct adapter *);
87 static void ixv_stop(void *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static void ixv_identify_hardware(struct adapter *);
91 static int ixv_allocate_pci_resources(struct adapter *,
92 const struct pci_attach_args *);
93 static int ixv_allocate_msix(struct adapter *,
94 const struct pci_attach_args *);
95 static int ixv_setup_msix(struct adapter *);
96 static void ixv_free_pci_resources(struct adapter *);
97 static void ixv_local_timer(void *);
98 static void ixv_local_timer_locked(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
101
102 static void ixv_initialize_transmit_units(struct adapter *);
103 static void ixv_initialize_receive_units(struct adapter *);
104
105 static void ixv_enable_intr(struct adapter *);
106 static void ixv_disable_intr(struct adapter *);
107 static void ixv_set_multi(struct adapter *);
108 static void ixv_update_link_status(struct adapter *);
109 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
110 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
111 static void ixv_configure_ivars(struct adapter *);
112 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
113
114 static void ixv_setup_vlan_support(struct adapter *);
115 #if 0
116 static void ixv_register_vlan(void *, struct ifnet *, u16);
117 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
118 #endif
119
120 static void ixv_save_stats(struct adapter *);
121 static void ixv_init_stats(struct adapter *);
122 static void ixv_update_stats(struct adapter *);
123 static void ixv_add_stats_sysctls(struct adapter *);
124 static void ixv_set_sysctl_value(struct adapter *, const char *,
125 const char *, int *, int);
126
127 /* The MSI/X Interrupt handlers */
128 static int ixv_msix_que(void *);
129 static int ixv_msix_mbx(void *);
130
131 /* Deferred interrupt tasklets */
132 static void ixv_handle_que(void *);
133 static void ixv_handle_mbx(void *);
134
135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
137
138 #ifdef DEV_NETMAP
139 /*
140 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
141 * if_ix.c.
142 */
143 extern void ixgbe_netmap_attach(struct adapter *adapter);
144
145 #include <net/netmap.h>
146 #include <sys/selinfo.h>
147 #include <dev/netmap/netmap_kern.h>
148 #endif /* DEV_NETMAP */
149
150 /*********************************************************************
151 * FreeBSD Device Interface Entry Points
152 *********************************************************************/
153
154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
155 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
156 DVF_DETACH_SHUTDOWN);
157
158 # if 0
159 static device_method_t ixv_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, ixv_probe),
162 DEVMETHOD(device_attach, ixv_attach),
163 DEVMETHOD(device_detach, ixv_detach),
164 DEVMETHOD(device_shutdown, ixv_shutdown),
165 DEVMETHOD_END
166 };
167 #endif
168
169 #if 0
170 static driver_t ixv_driver = {
171 "ixv", ixv_methods, sizeof(struct adapter),
172 };
173
174 devclass_t ixv_devclass;
175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
178 #ifdef DEV_NETMAP
179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
180 #endif /* DEV_NETMAP */
181 /* XXX depend on 'ix' ? */
182 #endif
183
184 /*
185 ** TUNEABLE PARAMETERS:
186 */
187
188 /* Number of Queues - do not exceed MSIX vectors - 1 */
189 static int ixv_num_queues = 0;
190 #define TUNABLE_INT(__x, __y)
191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
192
193 /*
194 ** AIM: Adaptive Interrupt Moderation
195 ** which means that the interrupt rate
196 ** is varied over time based on the
197 ** traffic for that interrupt vector
198 */
199 static int ixv_enable_aim = FALSE;
200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
201
202 /* How many packets rxeof tries to clean at a time */
203 static int ixv_rx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
205
206 /* How many packets txeof tries to clean at a time */
207 static int ixv_tx_process_limit = 256;
208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
209
210 /*
211 ** Number of TX descriptors per ring,
212 ** setting higher than RX as this seems
213 ** the better performing choice.
214 */
215 static int ixv_txd = DEFAULT_TXD;
216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
217
218 /* Number of RX descriptors per ring */
219 static int ixv_rxd = DEFAULT_RXD;
220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
221
222 /*
223 ** Shadow VFTA table, this is needed because
224 ** the real filter table gets cleared during
225 ** a soft reset and we need to repopulate it.
226 */
227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
228
229 /*********************************************************************
230 * Device identification routine
231 *
232 * ixv_probe determines if the driver should be loaded on
233 * adapter based on PCI vendor/device id of the adapter.
234 *
235 * return 1 on success, 0 on failure
236 *********************************************************************/
237
238 static int
239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
240 {
241 #ifdef __HAVE_PCI_MSI_MSIX
242 const struct pci_attach_args *pa = aux;
243
244 return (ixv_lookup(pa) != NULL) ? 1 : 0;
245 #else
246 return 0;
247 #endif
248 }
249
250 static ixgbe_vendor_info_t *
251 ixv_lookup(const struct pci_attach_args *pa)
252 {
253 pcireg_t subid;
254 ixgbe_vendor_info_t *ent;
255
256 INIT_DEBUGOUT("ixv_lookup: begin");
257
258 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
259 return NULL;
260
261 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
262
263 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
264 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
265 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
266
267 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
269
270 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 return ent;
273 }
274 }
275 return NULL;
276 }
277
278
279 static void
280 ixv_sysctl_attach(struct adapter *adapter)
281 {
282 struct sysctllog **log;
283 const struct sysctlnode *rnode, *cnode;
284 device_t dev;
285
286 dev = adapter->dev;
287 log = &adapter->sysctllog;
288
289 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
290 aprint_error_dev(dev, "could not create sysctl root\n");
291 return;
292 }
293
294 if (sysctl_createv(log, 0, &rnode, &cnode,
295 CTLFLAG_READWRITE, CTLTYPE_INT,
296 "debug", SYSCTL_DESCR("Debug Info"),
297 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
298 aprint_error_dev(dev, "could not create sysctl\n");
299
300 /* XXX This is an *instance* sysctl controlling a *global* variable.
301 * XXX It's that way in the FreeBSD driver that this derives from.
302 */
303 if (sysctl_createv(log, 0, &rnode, &cnode,
304 CTLFLAG_READWRITE, CTLTYPE_INT,
305 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
306 NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
307 aprint_error_dev(dev, "could not create sysctl\n");
308 }
309
310 /*********************************************************************
311 * Device initialization routine
312 *
313 * The attach entry point is called when the driver is being loaded.
314 * This routine identifies the type of hardware, allocates all resources
315 * and initializes the hardware.
316 *
317 * return 0 on success, positive on failure
318 *********************************************************************/
319
320 static void
321 ixv_attach(device_t parent, device_t dev, void *aux)
322 {
323 struct adapter *adapter;
324 struct ixgbe_hw *hw;
325 int error = 0;
326 ixgbe_vendor_info_t *ent;
327 const struct pci_attach_args *pa = aux;
328
329 INIT_DEBUGOUT("ixv_attach: begin");
330
331 /* Allocate, clear, and link in our adapter structure */
332 adapter = device_private(dev);
333 adapter->dev = dev;
334 hw = &adapter->hw;
335
336 #ifdef DEV_NETMAP
337 adapter->init_locked = ixv_init_locked;
338 adapter->stop_locked = ixv_stop;
339 #endif
340
341 adapter->osdep.pc = pa->pa_pc;
342 adapter->osdep.tag = pa->pa_tag;
343 if (pci_dma64_available(pa))
344 adapter->osdep.dmat = pa->pa_dmat64;
345 else
346 adapter->osdep.dmat = pa->pa_dmat;
347 adapter->osdep.attached = false;
348
349 ent = ixv_lookup(pa);
350
351 KASSERT(ent != NULL);
352
353 aprint_normal(": %s, Version - %s\n",
354 ixv_strings[ent->index], ixv_driver_version);
355
356 /* Core Lock Init*/
357 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
358
359 /* SYSCTL APIs */
360 ixv_sysctl_attach(adapter);
361
362 /* Set up the timer callout */
363 callout_init(&adapter->timer, 0);
364
365 /* Determine hardware revision */
366 ixv_identify_hardware(adapter);
367
368 /* Do base PCI setup - map BAR0 */
369 if (ixv_allocate_pci_resources(adapter, pa)) {
370 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
371 error = ENXIO;
372 goto err_out;
373 }
374
375 /* Sysctls for limiting the amount of work done in the taskqueues */
376 ixv_set_sysctl_value(adapter, "rx_processing_limit",
377 "max number of rx packets to process",
378 &adapter->rx_process_limit, ixv_rx_process_limit);
379
380 ixv_set_sysctl_value(adapter, "tx_processing_limit",
381 "max number of tx packets to process",
382 &adapter->tx_process_limit, ixv_tx_process_limit);
383
384 /* Do descriptor calc and sanity checks */
385 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
386 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
387 aprint_error_dev(dev, "TXD config issue, using default!\n");
388 adapter->num_tx_desc = DEFAULT_TXD;
389 } else
390 adapter->num_tx_desc = ixv_txd;
391
392 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
393 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
394 aprint_error_dev(dev, "RXD config issue, using default!\n");
395 adapter->num_rx_desc = DEFAULT_RXD;
396 } else
397 adapter->num_rx_desc = ixv_rxd;
398
399 /* Allocate our TX/RX Queues */
400 if (ixgbe_allocate_queues(adapter)) {
401 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
402 error = ENOMEM;
403 goto err_out;
404 }
405
406 /*
407 ** Initialize the shared code: its
408 ** at this point the mac type is set.
409 */
410 error = ixgbe_init_shared_code(hw);
411 if (error) {
412 aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
413 error = EIO;
414 goto err_late;
415 }
416
417 /* Setup the mailbox */
418 ixgbe_init_mbx_params_vf(hw);
419
420 /* Reset mbox api to 1.0 */
421 error = ixgbe_reset_hw(hw);
422 if (error == IXGBE_ERR_RESET_FAILED)
423 aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
424 else if (error)
425 aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
426 if (error) {
427 error = EIO;
428 goto err_late;
429 }
430
431 /* Negotiate mailbox API version */
432 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
433 if (error)
434 aprint_debug_dev(dev,
435 "MBX API 1.1 negotiation failed! Error %d\n", error);
436
437 error = ixgbe_init_hw(hw);
438 if (error) {
439 aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
440 error = EIO;
441 goto err_late;
442 }
443
444 error = ixv_allocate_msix(adapter, pa);
445 if (error) {
446 device_printf(dev, "ixv_allocate_msix() failed!\n");
447 goto err_late;
448 }
449
450 /* If no mac address was assigned, make a random one */
451 if (!ixv_check_ether_addr(hw->mac.addr)) {
452 u8 addr[ETHER_ADDR_LEN];
453 uint64_t rndval = cprng_fast64();
454
455 memcpy(addr, &rndval, sizeof(addr));
456 addr[0] &= 0xFE;
457 addr[0] |= 0x02;
458 bcopy(addr, hw->mac.addr, sizeof(addr));
459 }
460
461 /* Setup OS specific network interface */
462 ixv_setup_interface(dev, adapter);
463
464 /* Do the stats setup */
465 ixv_save_stats(adapter);
466 ixv_init_stats(adapter);
467 ixv_add_stats_sysctls(adapter);
468
469 /* Register for VLAN events */
470 #if 0 /* XXX delete after write? */
471 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
472 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
473 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
474 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
475 #endif
476
477 #ifdef DEV_NETMAP
478 ixgbe_netmap_attach(adapter);
479 #endif /* DEV_NETMAP */
480 INIT_DEBUGOUT("ixv_attach: end");
481 adapter->osdep.attached = true;
482 return;
483
484 err_late:
485 ixgbe_free_transmit_structures(adapter);
486 ixgbe_free_receive_structures(adapter);
487 err_out:
488 ixv_free_pci_resources(adapter);
489 return;
490
491 }
492
493 /*********************************************************************
494 * Device removal routine
495 *
496 * The detach entry point is called when the driver is being removed.
497 * This routine stops the adapter and deallocates all the resources
498 * that were allocated for driver operation.
499 *
500 * return 0 on success, positive on failure
501 *********************************************************************/
502
503 static int
504 ixv_detach(device_t dev, int flags)
505 {
506 struct adapter *adapter = device_private(dev);
507 struct ix_queue *que = adapter->queues;
508 struct tx_ring *txr = adapter->tx_rings;
509
510 INIT_DEBUGOUT("ixv_detach: begin");
511 if (adapter->osdep.attached == false)
512 return 0;
513
514 #if NVLAN > 0
515 /* Make sure VLANS are not using driver */
516 if (!VLAN_ATTACHED(&adapter->osdep.ec))
517 ; /* nothing to do: no VLANs */
518 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
519 vlan_ifdetach(adapter->ifp);
520 else {
521 aprint_error_dev(dev, "VLANs in use, detach first\n");
522 return EBUSY;
523 }
524 #endif
525
526 IXGBE_CORE_LOCK(adapter);
527 ixv_stop(adapter);
528 IXGBE_CORE_UNLOCK(adapter);
529
530 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
531 #ifndef IXGBE_LEGACY_TX
532 softint_disestablish(txr->txr_si);
533 #endif
534 softint_disestablish(que->que_si);
535 }
536
537 /* Drain the Mailbox(link) queue */
538 softint_disestablish(adapter->link_si);
539
540 /* Unregister VLAN events */
541 #if 0 /* XXX msaitoh delete after write? */
542 if (adapter->vlan_attach != NULL)
543 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
544 if (adapter->vlan_detach != NULL)
545 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
546 #endif
547
548 ether_ifdetach(adapter->ifp);
549 callout_halt(&adapter->timer, NULL);
550 #ifdef DEV_NETMAP
551 netmap_detach(adapter->ifp);
552 #endif /* DEV_NETMAP */
553 ixv_free_pci_resources(adapter);
554 #if 0 /* XXX the NetBSD port is probably missing something here */
555 bus_generic_detach(dev);
556 #endif
557 if_detach(adapter->ifp);
558
559 sysctl_teardown(&adapter->sysctllog);
560
561 ixgbe_free_transmit_structures(adapter);
562 ixgbe_free_receive_structures(adapter);
563
564 IXGBE_CORE_LOCK_DESTROY(adapter);
565 return (0);
566 }
567
568 /*********************************************************************
569 *
570 * Shutdown entry point
571 *
572 **********************************************************************/
573 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
574 static int
575 ixv_shutdown(device_t dev)
576 {
577 struct adapter *adapter = device_private(dev);
578 IXGBE_CORE_LOCK(adapter);
579 ixv_stop(adapter);
580 IXGBE_CORE_UNLOCK(adapter);
581 return (0);
582 }
583 #endif
584
585 static int
586 ixv_ifflags_cb(struct ethercom *ec)
587 {
588 struct ifnet *ifp = &ec->ec_if;
589 struct adapter *adapter = ifp->if_softc;
590 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
591
592 IXGBE_CORE_LOCK(adapter);
593
594 if (change != 0)
595 adapter->if_flags = ifp->if_flags;
596
597 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
598 rc = ENETRESET;
599
600 IXGBE_CORE_UNLOCK(adapter);
601
602 return rc;
603 }
604
605 /*********************************************************************
606 * Ioctl entry point
607 *
608 * ixv_ioctl is called when the user wants to configure the
609 * interface.
610 *
611 * return 0 on success, positive on failure
612 **********************************************************************/
613
614 static int
615 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
616 {
617 struct adapter *adapter = ifp->if_softc;
618 struct ifcapreq *ifcr = data;
619 struct ifreq *ifr = (struct ifreq *) data;
620 int error = 0;
621 int l4csum_en;
622 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
623 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
624
625 switch (command) {
626 case SIOCSIFFLAGS:
627 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
628 break;
629 case SIOCADDMULTI:
630 case SIOCDELMULTI:
631 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
632 break;
633 case SIOCSIFMEDIA:
634 case SIOCGIFMEDIA:
635 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
636 break;
637 case SIOCSIFCAP:
638 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
639 break;
640 case SIOCSIFMTU:
641 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
642 break;
643 default:
644 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
645 break;
646 }
647
648 switch (command) {
649 case SIOCSIFMEDIA:
650 case SIOCGIFMEDIA:
651 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
652 case SIOCSIFCAP:
653 /* Layer-4 Rx checksum offload has to be turned on and
654 * off as a unit.
655 */
656 l4csum_en = ifcr->ifcr_capenable & l4csum;
657 if (l4csum_en != l4csum && l4csum_en != 0)
658 return EINVAL;
659 /*FALLTHROUGH*/
660 case SIOCADDMULTI:
661 case SIOCDELMULTI:
662 case SIOCSIFFLAGS:
663 case SIOCSIFMTU:
664 default:
665 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
666 return error;
667 if ((ifp->if_flags & IFF_RUNNING) == 0)
668 ;
669 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
670 IXGBE_CORE_LOCK(adapter);
671 ixv_init_locked(adapter);
672 IXGBE_CORE_UNLOCK(adapter);
673 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
674 /*
675 * Multicast list has changed; set the hardware filter
676 * accordingly.
677 */
678 IXGBE_CORE_LOCK(adapter);
679 ixv_disable_intr(adapter);
680 ixv_set_multi(adapter);
681 ixv_enable_intr(adapter);
682 IXGBE_CORE_UNLOCK(adapter);
683 }
684 return 0;
685 }
686 }
687
688 /*********************************************************************
689 * Init entry point
690 *
691 * This routine is used in two ways. It is used by the stack as
692 * init entry point in network interface structure. It is also used
693 * by the driver as a hw/sw initialization routine to get to a
694 * consistent state.
695 *
696 * return 0 on success, positive on failure
697 **********************************************************************/
698 #define IXGBE_MHADD_MFS_SHIFT 16
699
700 static void
701 ixv_init_locked(struct adapter *adapter)
702 {
703 struct ifnet *ifp = adapter->ifp;
704 device_t dev = adapter->dev;
705 struct ixgbe_hw *hw = &adapter->hw;
706 int error = 0;
707
708 INIT_DEBUGOUT("ixv_init_locked: begin");
709 KASSERT(mutex_owned(&adapter->core_mtx));
710 hw->adapter_stopped = FALSE;
711 ixgbe_stop_adapter(hw);
712 callout_stop(&adapter->timer);
713
714 /* reprogram the RAR[0] in case user changed it. */
715 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
716
717 /* Get the latest mac address, User can use a LAA */
718 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
719 IXGBE_ETH_LENGTH_OF_ADDRESS);
720 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
721 hw->addr_ctrl.rar_used_count = 1;
722
723 /* Prepare transmit descriptors and buffers */
724 if (ixgbe_setup_transmit_structures(adapter)) {
725 aprint_error_dev(dev, "Could not setup transmit structures\n");
726 ixv_stop(adapter);
727 return;
728 }
729
730 /* Reset VF and renegotiate mailbox API version */
731 ixgbe_reset_hw(hw);
732 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
733 if (error)
734 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
735
736 ixv_initialize_transmit_units(adapter);
737
738 /* Setup Multicast table */
739 ixv_set_multi(adapter);
740
741 /*
742 ** Determine the correct mbuf pool
743 ** for doing jumbo/headersplit
744 */
745 if (ifp->if_mtu > ETHERMTU)
746 adapter->rx_mbuf_sz = MJUMPAGESIZE;
747 else
748 adapter->rx_mbuf_sz = MCLBYTES;
749
750 /* Prepare receive descriptors and buffers */
751 if (ixgbe_setup_receive_structures(adapter)) {
752 device_printf(dev, "Could not setup receive structures\n");
753 ixv_stop(adapter);
754 return;
755 }
756
757 /* Configure RX settings */
758 ixv_initialize_receive_units(adapter);
759
760 #if 0 /* XXX isn't it required? -- msaitoh */
761 /* Set the various hardware offload abilities */
762 ifp->if_hwassist = 0;
763 if (ifp->if_capenable & IFCAP_TSO4)
764 ifp->if_hwassist |= CSUM_TSO;
765 if (ifp->if_capenable & IFCAP_TXCSUM) {
766 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
767 #if __FreeBSD_version >= 800000
768 ifp->if_hwassist |= CSUM_SCTP;
769 #endif
770 }
771 #endif
772
773 /* Set up VLAN offload and filter */
774 ixv_setup_vlan_support(adapter);
775
776 /* Set up MSI/X routing */
777 ixv_configure_ivars(adapter);
778
779 /* Set up auto-mask */
780 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
781
782 /* Set moderation on the Link interrupt */
783 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
784
785 /* Stats init */
786 ixv_init_stats(adapter);
787
788 /* Config/Enable Link */
789 ixv_config_link(adapter);
790 hw->mac.get_link_status = TRUE;
791
792 /* Start watchdog */
793 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
794
795 /* And now turn on interrupts */
796 ixv_enable_intr(adapter);
797
798 /* Now inform the stack we're ready */
799 ifp->if_flags |= IFF_RUNNING;
800 ifp->if_flags &= ~IFF_OACTIVE;
801
802 return;
803 }
804
805 static int
806 ixv_init(struct ifnet *ifp)
807 {
808 struct adapter *adapter = ifp->if_softc;
809
810 IXGBE_CORE_LOCK(adapter);
811 ixv_init_locked(adapter);
812 IXGBE_CORE_UNLOCK(adapter);
813 return 0;
814 }
815
816
817 /*
818 **
819 ** MSIX Interrupt Handlers and Tasklets
820 **
821 */
822
823 static inline void
824 ixv_enable_queue(struct adapter *adapter, u32 vector)
825 {
826 struct ixgbe_hw *hw = &adapter->hw;
827 u32 queue = 1 << vector;
828 u32 mask;
829
830 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
831 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
832 }
833
834 static inline void
835 ixv_disable_queue(struct adapter *adapter, u32 vector)
836 {
837 struct ixgbe_hw *hw = &adapter->hw;
838 u64 queue = (u64)(1 << vector);
839 u32 mask;
840
841 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
842 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
843 }
844
845 static inline void
846 ixv_rearm_queues(struct adapter *adapter, u64 queues)
847 {
848 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
849 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
850 }
851
852
853 static void
854 ixv_handle_que(void *context)
855 {
856 struct ix_queue *que = context;
857 struct adapter *adapter = que->adapter;
858 struct tx_ring *txr = que->txr;
859 struct ifnet *ifp = adapter->ifp;
860 bool more;
861
862 if (ifp->if_flags & IFF_RUNNING) {
863 more = ixgbe_rxeof(que);
864 IXGBE_TX_LOCK(txr);
865 ixgbe_txeof(txr);
866 #ifndef IXGBE_LEGACY_TX
867 if (pcq_peek(txr->txr_interq) != NULL)
868 ixgbe_mq_start_locked(ifp, txr);
869 #else
870 if (!IFQ_IS_EMPTY(&ifp->if_snd))
871 ixgbe_start_locked(txr, ifp);
872 #endif
873 IXGBE_TX_UNLOCK(txr);
874 if (more) {
875 adapter->req.ev_count++;
876 softint_schedule(que->que_si);
877 return;
878 }
879 }
880
881 /* Reenable this interrupt */
882 ixv_enable_queue(adapter, que->msix);
883 return;
884 }
885
886 /*********************************************************************
887 *
888 * MSI Queue Interrupt Service routine
889 *
890 **********************************************************************/
891 int
892 ixv_msix_que(void *arg)
893 {
894 struct ix_queue *que = arg;
895 struct adapter *adapter = que->adapter;
896 struct ifnet *ifp = adapter->ifp;
897 struct tx_ring *txr = que->txr;
898 struct rx_ring *rxr = que->rxr;
899 bool more;
900 u32 newitr = 0;
901
902 ixv_disable_queue(adapter, que->msix);
903 ++que->irqs.ev_count;
904
905 #ifdef __NetBSD__
906 /* Don't run ixgbe_rxeof in interrupt context */
907 more = true;
908 #else
909 more = ixgbe_rxeof(que);
910 #endif
911
912 IXGBE_TX_LOCK(txr);
913 ixgbe_txeof(txr);
914 /*
915 ** Make certain that if the stack
916 ** has anything queued the task gets
917 ** scheduled to handle it.
918 */
919 #ifdef IXGBE_LEGACY_TX
920 if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
921 ixgbe_start_locked(txr, ifp);
922 #else
923 if (pcq_peek(txr->txr_interq) != NULL)
924 ixgbe_mq_start_locked(ifp, txr);
925 #endif
926 IXGBE_TX_UNLOCK(txr);
927
928 /* Do AIM now? */
929
930 if (ixv_enable_aim == FALSE)
931 goto no_calc;
932 /*
933 ** Do Adaptive Interrupt Moderation:
934 ** - Write out last calculated setting
935 ** - Calculate based on average size over
936 ** the last interval.
937 */
938 if (que->eitr_setting)
939 IXGBE_WRITE_REG(&adapter->hw,
940 IXGBE_VTEITR(que->msix),
941 que->eitr_setting);
942
943 que->eitr_setting = 0;
944
945 /* Idle, do nothing */
946 if ((txr->bytes == 0) && (rxr->bytes == 0))
947 goto no_calc;
948
949 if ((txr->bytes) && (txr->packets))
950 newitr = txr->bytes/txr->packets;
951 if ((rxr->bytes) && (rxr->packets))
952 newitr = max(newitr,
953 (rxr->bytes / rxr->packets));
954 newitr += 24; /* account for hardware frame, crc */
955
956 /* set an upper boundary */
957 newitr = min(newitr, 3000);
958
959 /* Be nice to the mid range */
960 if ((newitr > 300) && (newitr < 1200))
961 newitr = (newitr / 3);
962 else
963 newitr = (newitr / 2);
964
965 newitr |= newitr << 16;
966
967 /* save for next interrupt */
968 que->eitr_setting = newitr;
969
970 /* Reset state */
971 txr->bytes = 0;
972 txr->packets = 0;
973 rxr->bytes = 0;
974 rxr->packets = 0;
975
976 no_calc:
977 if (more)
978 softint_schedule(que->que_si);
979 else /* Reenable this interrupt */
980 ixv_enable_queue(adapter, que->msix);
981 return 1;
982 }
983
984 static int
985 ixv_msix_mbx(void *arg)
986 {
987 struct adapter *adapter = arg;
988 struct ixgbe_hw *hw = &adapter->hw;
989 u32 reg;
990
991 ++adapter->link_irq.ev_count;
992
993 /* First get the cause */
994 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
995 /* Clear interrupt with write */
996 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
997
998 /* Link status change */
999 if (reg & IXGBE_EICR_LSC)
1000 softint_schedule(adapter->link_si);
1001
1002 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1003 return 1;
1004 }
1005
1006 /*********************************************************************
1007 *
1008 * Media Ioctl callback
1009 *
1010 * This routine is called whenever the user queries the status of
1011 * the interface using ifconfig.
1012 *
1013 **********************************************************************/
1014 static void
1015 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1016 {
1017 struct adapter *adapter = ifp->if_softc;
1018
1019 INIT_DEBUGOUT("ixv_media_status: begin");
1020 IXGBE_CORE_LOCK(adapter);
1021 ixv_update_link_status(adapter);
1022
1023 ifmr->ifm_status = IFM_AVALID;
1024 ifmr->ifm_active = IFM_ETHER;
1025
1026 if (!adapter->link_active) {
1027 ifmr->ifm_active |= IFM_NONE;
1028 IXGBE_CORE_UNLOCK(adapter);
1029 return;
1030 }
1031
1032 ifmr->ifm_status |= IFM_ACTIVE;
1033
1034 switch (adapter->link_speed) {
1035 case IXGBE_LINK_SPEED_10GB_FULL:
1036 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1037 break;
1038 case IXGBE_LINK_SPEED_1GB_FULL:
1039 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1040 break;
1041 case IXGBE_LINK_SPEED_100_FULL:
1042 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1043 break;
1044 }
1045
1046 IXGBE_CORE_UNLOCK(adapter);
1047
1048 return;
1049 }
1050
1051 /*********************************************************************
1052 *
1053 * Media Ioctl callback
1054 *
1055 * This routine is called when the user changes speed/duplex using
1056 * media/mediopt option with ifconfig.
1057 *
1058 **********************************************************************/
1059 static int
1060 ixv_media_change(struct ifnet * ifp)
1061 {
1062 struct adapter *adapter = ifp->if_softc;
1063 struct ifmedia *ifm = &adapter->media;
1064
1065 INIT_DEBUGOUT("ixv_media_change: begin");
1066
1067 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1068 return (EINVAL);
1069
1070 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1071 case IFM_AUTO:
1072 break;
1073 default:
1074 device_printf(adapter->dev, "Only auto media type\n");
1075 return (EINVAL);
1076 }
1077
1078 return (0);
1079 }
1080
1081
1082 /*********************************************************************
1083 * Multicast Update
1084 *
1085 * This routine is called whenever multicast address list is updated.
1086 *
1087 **********************************************************************/
1088 #define IXGBE_RAR_ENTRIES 16
1089
1090 static void
1091 ixv_set_multi(struct adapter *adapter)
1092 {
1093 struct ether_multi *enm;
1094 struct ether_multistep step;
1095 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1096 u8 *update_ptr;
1097 int mcnt = 0;
1098 struct ethercom *ec = &adapter->osdep.ec;
1099
1100 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1101
1102 ETHER_FIRST_MULTI(step, ec, enm);
1103 while (enm != NULL) {
1104 bcopy(enm->enm_addrlo,
1105 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1106 IXGBE_ETH_LENGTH_OF_ADDRESS);
1107 mcnt++;
1108 /* XXX This might be required --msaitoh */
1109 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1110 break;
1111 ETHER_NEXT_MULTI(step, enm);
1112 }
1113
1114 update_ptr = mta;
1115
1116 ixgbe_update_mc_addr_list(&adapter->hw,
1117 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1118
1119 return;
1120 }
1121
1122 /*
1123 * This is an iterator function now needed by the multicast
1124 * shared code. It simply feeds the shared code routine the
1125 * addresses in the array of ixv_set_multi() one by one.
1126 */
1127 static u8 *
1128 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1129 {
1130 u8 *addr = *update_ptr;
1131 u8 *newptr;
1132 *vmdq = 0;
1133
1134 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1135 *update_ptr = newptr;
1136 return addr;
1137 }
1138
1139 /*********************************************************************
1140 * Timer routine
1141 *
1142 * This routine checks for link status,updates statistics,
1143 * and runs the watchdog check.
1144 *
1145 **********************************************************************/
1146
1147 static void
1148 ixv_local_timer(void *arg)
1149 {
1150 struct adapter *adapter = arg;
1151
1152 IXGBE_CORE_LOCK(adapter);
1153 ixv_local_timer_locked(adapter);
1154 IXGBE_CORE_UNLOCK(adapter);
1155 }
1156
1157 static void
1158 ixv_local_timer_locked(void *arg)
1159 {
1160 struct adapter *adapter = arg;
1161 device_t dev = adapter->dev;
1162 struct ix_queue *que = adapter->queues;
1163 u64 queues = 0;
1164 int hung = 0;
1165
1166 KASSERT(mutex_owned(&adapter->core_mtx));
1167
1168 ixv_update_link_status(adapter);
1169
1170 /* Stats Update */
1171 ixv_update_stats(adapter);
1172
1173 /*
1174 ** Check the TX queues status
1175 ** - mark hung queues so we don't schedule on them
1176 ** - watchdog only if all queues show hung
1177 */
1178 for (int i = 0; i < adapter->num_queues; i++, que++) {
1179 /* Keep track of queues with work for soft irq */
1180 if (que->txr->busy)
1181 queues |= ((u64)1 << que->me);
1182 /*
1183 ** Each time txeof runs without cleaning, but there
1184 ** are uncleaned descriptors it increments busy. If
1185 ** we get to the MAX we declare it hung.
1186 */
1187 if (que->busy == IXGBE_QUEUE_HUNG) {
1188 ++hung;
1189 /* Mark the queue as inactive */
1190 adapter->active_queues &= ~((u64)1 << que->me);
1191 continue;
1192 } else {
1193 /* Check if we've come back from hung */
1194 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1195 adapter->active_queues |= ((u64)1 << que->me);
1196 }
1197 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1198 device_printf(dev,"Warning queue %d "
1199 "appears to be hung!\n", i);
1200 que->txr->busy = IXGBE_QUEUE_HUNG;
1201 ++hung;
1202 }
1203
1204 }
1205
1206 /* Only truly watchdog if all queues show hung */
1207 if (hung == adapter->num_queues)
1208 goto watchdog;
1209 else if (queues != 0) { /* Force an IRQ on queues with work */
1210 ixv_rearm_queues(adapter, queues);
1211 }
1212
1213 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1214 return;
1215
1216 watchdog:
1217 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1218 adapter->ifp->if_flags &= ~IFF_RUNNING;
1219 adapter->watchdog_events.ev_count++;
1220 ixv_init_locked(adapter);
1221 }
1222
1223 /*
1224 ** Note: this routine updates the OS on the link state
1225 ** the real check of the hardware only happens with
1226 ** a link interrupt.
1227 */
1228 static void
1229 ixv_update_link_status(struct adapter *adapter)
1230 {
1231 struct ifnet *ifp = adapter->ifp;
1232 device_t dev = adapter->dev;
1233
1234 if (adapter->link_up){
1235 if (adapter->link_active == FALSE) {
1236 if (bootverbose) {
1237 const char *bpsmsg;
1238
1239 switch (adapter->link_speed) {
1240 case IXGBE_LINK_SPEED_10GB_FULL:
1241 bpsmsg = "10 Gbps";
1242 break;
1243 case IXGBE_LINK_SPEED_1GB_FULL:
1244 bpsmsg = "1 Gbps";
1245 break;
1246 case IXGBE_LINK_SPEED_100_FULL:
1247 bpsmsg = "100 Mbps";
1248 break;
1249 default:
1250 bpsmsg = "unknown speed";
1251 break;
1252 }
1253 device_printf(dev,"Link is up %s %s \n",
1254 bpsmsg, "Full Duplex");
1255 }
1256 adapter->link_active = TRUE;
1257 if_link_state_change(ifp, LINK_STATE_UP);
1258 }
1259 } else { /* Link down */
1260 if (adapter->link_active == TRUE) {
1261 if (bootverbose)
1262 device_printf(dev,"Link is Down\n");
1263 if_link_state_change(ifp, LINK_STATE_DOWN);
1264 adapter->link_active = FALSE;
1265 }
1266 }
1267
1268 return;
1269 }
1270
1271
1272 static void
1273 ixv_ifstop(struct ifnet *ifp, int disable)
1274 {
1275 struct adapter *adapter = ifp->if_softc;
1276
1277 IXGBE_CORE_LOCK(adapter);
1278 ixv_stop(adapter);
1279 IXGBE_CORE_UNLOCK(adapter);
1280 }
1281
1282 /*********************************************************************
1283 *
1284 * This routine disables all traffic on the adapter by issuing a
1285 * global reset on the MAC and deallocates TX/RX buffers.
1286 *
1287 **********************************************************************/
1288
1289 static void
1290 ixv_stop(void *arg)
1291 {
1292 struct ifnet *ifp;
1293 struct adapter *adapter = arg;
1294 struct ixgbe_hw *hw = &adapter->hw;
1295 ifp = adapter->ifp;
1296
1297 KASSERT(mutex_owned(&adapter->core_mtx));
1298
1299 INIT_DEBUGOUT("ixv_stop: begin\n");
1300 ixv_disable_intr(adapter);
1301
1302 /* Tell the stack that the interface is no longer active */
1303 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1304
1305 ixgbe_reset_hw(hw);
1306 adapter->hw.adapter_stopped = FALSE;
1307 ixgbe_stop_adapter(hw);
1308 callout_stop(&adapter->timer);
1309
1310 /* reprogram the RAR[0] in case user changed it. */
1311 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1312
1313 return;
1314 }
1315
1316
1317 /*********************************************************************
1318 *
1319 * Determine hardware revision.
1320 *
1321 **********************************************************************/
1322 static void
1323 ixv_identify_hardware(struct adapter *adapter)
1324 {
1325 pcitag_t tag;
1326 pci_chipset_tag_t pc;
1327 pcireg_t subid, id;
1328 struct ixgbe_hw *hw = &adapter->hw;
1329
1330 pc = adapter->osdep.pc;
1331 tag = adapter->osdep.tag;
1332
1333 /*
1334 ** Make sure BUSMASTER is set, on a VM under
1335 ** KVM it may not be and will break things.
1336 */
1337 ixgbe_pci_enable_busmaster(pc, tag);
1338
1339 id = pci_conf_read(pc, tag, PCI_ID_REG);
1340 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1341
1342 /* Save off the information about this board */
1343 hw->vendor_id = PCI_VENDOR(id);
1344 hw->device_id = PCI_PRODUCT(id);
1345 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1346 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1347 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1348
1349 /* We need this to determine device-specific things */
1350 ixgbe_set_mac_type(hw);
1351
1352 /* Set the right number of segments */
1353 adapter->num_segs = IXGBE_82599_SCATTER;
1354
1355 return;
1356 }
1357
1358 /*********************************************************************
1359 *
1360 * Setup MSIX Interrupt resources and handlers
1361 *
1362 **********************************************************************/
1363 static int
1364 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1365 {
1366 device_t dev = adapter->dev;
1367 struct ix_queue *que = adapter->queues;
1368 struct tx_ring *txr = adapter->tx_rings;
1369 int error, rid, vector = 0;
1370 pci_chipset_tag_t pc;
1371 pcitag_t tag;
1372 char intrbuf[PCI_INTRSTR_LEN];
1373 char intr_xname[32];
1374 const char *intrstr = NULL;
1375 kcpuset_t *affinity;
1376 int cpu_id = 0;
1377
1378 pc = adapter->osdep.pc;
1379 tag = adapter->osdep.tag;
1380
1381 adapter->osdep.nintrs = adapter->num_queues + 1;
1382 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
1383 adapter->osdep.nintrs) != 0) {
1384 aprint_error_dev(dev,
1385 "failed to allocate MSI-X interrupt\n");
1386 return (ENXIO);
1387 }
1388
1389 kcpuset_create(&affinity, false);
1390 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1391 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
1392 device_xname(dev), i);
1393 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1394 sizeof(intrbuf));
1395 #ifdef IXV_MPSAFE
1396 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1397 true);
1398 #endif
1399 /* Set the handler function */
1400 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
1401 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
1402 intr_xname);
1403 if (que->res == NULL) {
1404 pci_intr_release(pc, adapter->osdep.intrs,
1405 adapter->osdep.nintrs);
1406 aprint_error_dev(dev,
1407 "Failed to register QUE handler\n");
1408 kcpuset_destroy(affinity);
1409 return (ENXIO);
1410 }
1411 que->msix = vector;
1412 adapter->active_queues |= (u64)(1 << que->msix);
1413
1414 cpu_id = i;
1415 /* Round-robin affinity */
1416 kcpuset_zero(affinity);
1417 kcpuset_set(affinity, cpu_id % ncpu);
1418 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1419 NULL);
1420 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1421 intrstr);
1422 if (error == 0)
1423 aprint_normal(", bound queue %d to cpu %d\n",
1424 i, cpu_id % ncpu);
1425 else
1426 aprint_normal("\n");
1427
1428 #ifndef IXGBE_LEGACY_TX
1429 txr->txr_si = softint_establish(SOFTINT_NET,
1430 ixgbe_deferred_mq_start, txr);
1431 #endif
1432 que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1433 que);
1434 if (que->que_si == NULL) {
1435 aprint_error_dev(dev,
1436 "could not establish software interrupt\n");
1437 }
1438 }
1439
1440 /* and Mailbox */
1441 cpu_id++;
1442 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
1443 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1444 sizeof(intrbuf));
1445 #ifdef IXG_MPSAFE
1446 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
1447 true);
1448 #endif
1449 /* Set the mbx handler function */
1450 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
1451 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
1452 intr_xname);
1453 if (adapter->osdep.ihs[vector] == NULL) {
1454 adapter->res = NULL;
1455 aprint_error_dev(dev, "Failed to register LINK handler\n");
1456 kcpuset_destroy(affinity);
1457 return (ENXIO);
1458 }
1459 /* Round-robin affinity */
1460 kcpuset_zero(affinity);
1461 kcpuset_set(affinity, cpu_id % ncpu);
1462 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1463
1464 aprint_normal_dev(dev,
1465 "for link, interrupting at %s", intrstr);
1466 if (error == 0)
1467 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
1468 else
1469 aprint_normal("\n");
1470
1471 adapter->vector = vector;
1472 /* Tasklets for Mailbox */
1473 adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1474 adapter);
1475 /*
1476 ** Due to a broken design QEMU will fail to properly
1477 ** enable the guest for MSIX unless the vectors in
1478 ** the table are all set up, so we must rewrite the
1479 ** ENABLE in the MSIX control register again at this
1480 ** point to cause it to successfully initialize us.
1481 */
1482 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1483 int msix_ctrl;
1484 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1485 rid += PCI_MSIX_CTL;
1486 msix_ctrl = pci_conf_read(pc, tag, rid);
1487 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1488 pci_conf_write(pc, tag, rid, msix_ctrl);
1489 }
1490
1491 kcpuset_destroy(affinity);
1492 return (0);
1493 }
1494
1495 /*
1496 * Setup MSIX resources, note that the VF
1497 * device MUST use MSIX, there is no fallback.
1498 */
1499 static int
1500 ixv_setup_msix(struct adapter *adapter)
1501 {
1502 device_t dev = adapter->dev;
1503 int want, queues, msgs;
1504
1505 /* Must have at least 2 MSIX vectors */
1506 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1507 if (msgs < 2) {
1508 aprint_error_dev(dev,"MSIX config error\n");
1509 return (ENXIO);
1510 }
1511 msgs = MIN(msgs, IXG_MAX_NINTR);
1512
1513 /* Figure out a reasonable auto config value */
1514 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
1515
1516 if (ixv_num_queues != 0)
1517 queues = ixv_num_queues;
1518 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
1519 queues = IXGBE_VF_MAX_TX_QUEUES;
1520
1521 /*
1522 ** Want vectors for the queues,
1523 ** plus an additional for mailbox.
1524 */
1525 want = queues + 1;
1526 if (msgs >= want)
1527 msgs = want;
1528 else {
1529 aprint_error_dev(dev,
1530 "MSIX Configuration Problem, "
1531 "%d vectors but %d queues wanted!\n",
1532 msgs, want);
1533 return -1;
1534 }
1535
1536 adapter->msix_mem = (void *)1; /* XXX */
1537 aprint_normal_dev(dev,
1538 "Using MSIX interrupts with %d vectors\n", msgs);
1539 adapter->num_queues = queues;
1540 return (msgs);
1541 }
1542
1543
1544 static int
1545 ixv_allocate_pci_resources(struct adapter *adapter,
1546 const struct pci_attach_args *pa)
1547 {
1548 pcireg_t memtype;
1549 device_t dev = adapter->dev;
1550 bus_addr_t addr;
1551 int flags;
1552
1553 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1554 switch (memtype) {
1555 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1556 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1557 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1558 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1559 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1560 goto map_err;
1561 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1562 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1563 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1564 }
1565 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1566 adapter->osdep.mem_size, flags,
1567 &adapter->osdep.mem_bus_space_handle) != 0) {
1568 map_err:
1569 adapter->osdep.mem_size = 0;
1570 aprint_error_dev(dev, "unable to map BAR0\n");
1571 return ENXIO;
1572 }
1573 break;
1574 default:
1575 aprint_error_dev(dev, "unexpected type on BAR0\n");
1576 return ENXIO;
1577 }
1578 adapter->hw.back = adapter;
1579
1580 /* Pick up the tuneable queues */
1581 adapter->num_queues = ixv_num_queues;
1582
1583 /*
1584 ** Now setup MSI/X, should
1585 ** return us the number of
1586 ** configured vectors.
1587 */
1588 adapter->msix = ixv_setup_msix(adapter);
1589 if (adapter->msix == ENXIO)
1590 return (ENXIO);
1591 else
1592 return (0);
1593 }
1594
1595 static void
1596 ixv_free_pci_resources(struct adapter * adapter)
1597 {
1598 struct ix_queue *que = adapter->queues;
1599 int rid;
1600
1601 /*
1602 ** Release all msix queue resources:
1603 */
1604 for (int i = 0; i < adapter->num_queues; i++, que++) {
1605 if (que->res != NULL)
1606 pci_intr_disestablish(adapter->osdep.pc,
1607 adapter->osdep.ihs[i]);
1608 }
1609
1610
1611 /* Clean the Legacy or Link interrupt last */
1612 if (adapter->vector) /* we are doing MSIX */
1613 rid = adapter->vector;
1614 else
1615 rid = 0;
1616
1617 if (adapter->osdep.ihs[rid] != NULL) {
1618 pci_intr_disestablish(adapter->osdep.pc,
1619 adapter->osdep.ihs[rid]);
1620 adapter->osdep.ihs[rid] = NULL;
1621 }
1622
1623 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1624 adapter->osdep.nintrs);
1625
1626 if (adapter->osdep.mem_size != 0) {
1627 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1628 adapter->osdep.mem_bus_space_handle,
1629 adapter->osdep.mem_size);
1630 }
1631
1632 return;
1633 }
1634
1635 /*********************************************************************
1636 *
1637 * Setup networking device structure and register an interface.
1638 *
1639 **********************************************************************/
1640 static void
1641 ixv_setup_interface(device_t dev, struct adapter *adapter)
1642 {
1643 struct ethercom *ec = &adapter->osdep.ec;
1644 struct ifnet *ifp;
1645
1646 INIT_DEBUGOUT("ixv_setup_interface: begin");
1647
1648 ifp = adapter->ifp = &ec->ec_if;
1649 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1650 ifp->if_baudrate = IF_Gbps(10);
1651 ifp->if_init = ixv_init;
1652 ifp->if_stop = ixv_ifstop;
1653 ifp->if_softc = adapter;
1654 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1655 ifp->if_ioctl = ixv_ioctl;
1656 #ifndef IXGBE_LEGACY_TX
1657 ifp->if_transmit = ixgbe_mq_start;
1658 #endif
1659 ifp->if_start = ixgbe_start;
1660 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1661 IFQ_SET_READY(&ifp->if_snd);
1662
1663 if_initialize(ifp);
1664 ether_ifattach(ifp, adapter->hw.mac.addr);
1665 #ifndef IXGBE_LEGACY_TX
1666 #if 0 /* We use per TX queue softint */
1667 if_deferred_start_init(ifp, ixgbe_deferred_mq_start);
1668 #endif
1669 #endif
1670 if_register(ifp);
1671 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1672
1673 adapter->max_frame_size =
1674 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1675
1676 /*
1677 * Tell the upper layer(s) we support long frames.
1678 */
1679 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1680
1681 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1682 ifp->if_capenable = 0;
1683
1684 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1685 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1686 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1687 | ETHERCAP_VLAN_MTU;
1688 ec->ec_capenable = ec->ec_capabilities;
1689
1690 /* Don't enable LRO by default */
1691 ifp->if_capabilities |= IFCAP_LRO;
1692 #if 0
1693 ifp->if_capenable = ifp->if_capabilities;
1694 #endif
1695
1696 /*
1697 ** Dont turn this on by default, if vlans are
1698 ** created on another pseudo device (eg. lagg)
1699 ** then vlan events are not passed thru, breaking
1700 ** operation, but with HW FILTER off it works. If
1701 ** using vlans directly on the em driver you can
1702 ** enable this and get full hardware tag filtering.
1703 */
1704 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1705
1706 /*
1707 * Specify the media types supported by this adapter and register
1708 * callbacks to update media and link information
1709 */
1710 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1711 ixv_media_status);
1712 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1713 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1714
1715 return;
1716 }
1717
1718 static void
1719 ixv_config_link(struct adapter *adapter)
1720 {
1721 struct ixgbe_hw *hw = &adapter->hw;
1722
1723 if (hw->mac.ops.check_link)
1724 hw->mac.ops.check_link(hw, &adapter->link_speed,
1725 &adapter->link_up, FALSE);
1726 }
1727
1728
1729 /*********************************************************************
1730 *
1731 * Enable transmit unit.
1732 *
1733 **********************************************************************/
1734 static void
1735 ixv_initialize_transmit_units(struct adapter *adapter)
1736 {
1737 struct tx_ring *txr = adapter->tx_rings;
1738 struct ixgbe_hw *hw = &adapter->hw;
1739
1740
1741 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1742 u64 tdba = txr->txdma.dma_paddr;
1743 u32 txctrl, txdctl;
1744
1745 /* Set WTHRESH to 8, burst writeback */
1746 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1747 txdctl |= (8 << 16);
1748 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1749
1750 /* Set the HW Tx Head and Tail indices */
1751 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1752 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1753
1754 /* Set Tx Tail register */
1755 txr->tail = IXGBE_VFTDT(i);
1756
1757 /* Set Ring parameters */
1758 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1759 (tdba & 0x00000000ffffffffULL));
1760 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1761 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1762 adapter->num_tx_desc *
1763 sizeof(struct ixgbe_legacy_tx_desc));
1764 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1765 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1766 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1767
1768 /* Now enable */
1769 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1770 txdctl |= IXGBE_TXDCTL_ENABLE;
1771 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1772 }
1773
1774 return;
1775 }
1776
1777
1778 /*********************************************************************
1779 *
1780 * Setup receive registers and features.
1781 *
1782 **********************************************************************/
1783 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1784
1785 static void
1786 ixv_initialize_receive_units(struct adapter *adapter)
1787 {
1788 struct rx_ring *rxr = adapter->rx_rings;
1789 struct ixgbe_hw *hw = &adapter->hw;
1790 struct ifnet *ifp = adapter->ifp;
1791 u32 bufsz, rxcsum, psrtype;
1792
1793 if (ifp->if_mtu > ETHERMTU)
1794 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1795 else
1796 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1797
1798 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1799 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1800 IXGBE_PSRTYPE_L2HDR;
1801
1802 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1803
1804 /* Tell PF our max_frame size */
1805 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1806
1807 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1808 u64 rdba = rxr->rxdma.dma_paddr;
1809 u32 reg, rxdctl;
1810
1811 /* Disable the queue */
1812 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1813 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1814 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1815 for (int j = 0; j < 10; j++) {
1816 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1817 IXGBE_RXDCTL_ENABLE)
1818 msec_delay(1);
1819 else
1820 break;
1821 }
1822 wmb();
1823 /* Setup the Base and Length of the Rx Descriptor Ring */
1824 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1825 (rdba & 0x00000000ffffffffULL));
1826 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1827 (rdba >> 32));
1828 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1829 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1830
1831 /* Reset the ring indices */
1832 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1833 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1834
1835 /* Set up the SRRCTL register */
1836 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1837 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1838 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1839 reg |= bufsz;
1840 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1841 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1842
1843 /* Capture Rx Tail index */
1844 rxr->tail = IXGBE_VFRDT(rxr->me);
1845
1846 /* Do the queue enabling last */
1847 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1848 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1849 for (int k = 0; k < 10; k++) {
1850 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1851 IXGBE_RXDCTL_ENABLE)
1852 break;
1853 else
1854 msec_delay(1);
1855 }
1856 wmb();
1857
1858 /* Set the Tail Pointer */
1859 #ifdef DEV_NETMAP
1860 /*
1861 * In netmap mode, we must preserve the buffers made
1862 * available to userspace before the if_init()
1863 * (this is true by default on the TX side, because
1864 * init makes all buffers available to userspace).
1865 *
1866 * netmap_reset() and the device specific routines
1867 * (e.g. ixgbe_setup_receive_rings()) map these
1868 * buffers at the end of the NIC ring, so here we
1869 * must set the RDT (tail) register to make sure
1870 * they are not overwritten.
1871 *
1872 * In this driver the NIC ring starts at RDH = 0,
1873 * RDT points to the last slot available for reception (?),
1874 * so RDT = num_rx_desc - 1 means the whole ring is available.
1875 */
1876 if (ifp->if_capenable & IFCAP_NETMAP) {
1877 struct netmap_adapter *na = NA(adapter->ifp);
1878 struct netmap_kring *kring = &na->rx_rings[i];
1879 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1880
1881 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1882 } else
1883 #endif /* DEV_NETMAP */
1884 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1885 adapter->num_rx_desc - 1);
1886 }
1887
1888 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1889
1890 if (ifp->if_capenable & IFCAP_RXCSUM)
1891 rxcsum |= IXGBE_RXCSUM_PCSD;
1892
1893 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1894 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1895
1896 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1897
1898 return;
1899 }
1900
1901 static void
1902 ixv_setup_vlan_support(struct adapter *adapter)
1903 {
1904 struct ixgbe_hw *hw = &adapter->hw;
1905 u32 ctrl, vid, vfta, retry;
1906 struct rx_ring *rxr;
1907
1908 /*
1909 ** We get here thru init_locked, meaning
1910 ** a soft reset, this has already cleared
1911 ** the VFTA and other state, so if there
1912 ** have been no vlan's registered do nothing.
1913 */
1914 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1915 return;
1916
1917 /* Enable the queues */
1918 for (int i = 0; i < adapter->num_queues; i++) {
1919 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1920 ctrl |= IXGBE_RXDCTL_VME;
1921 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1922 /*
1923 * Let Rx path know that it needs to store VLAN tag
1924 * as part of extra mbuf info.
1925 */
1926 rxr = &adapter->rx_rings[i];
1927 rxr->vtag_strip = TRUE;
1928 }
1929
1930 /*
1931 ** A soft reset zero's out the VFTA, so
1932 ** we need to repopulate it now.
1933 */
1934 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1935 if (ixv_shadow_vfta[i] == 0)
1936 continue;
1937 vfta = ixv_shadow_vfta[i];
1938 /*
1939 ** Reconstruct the vlan id's
1940 ** based on the bits set in each
1941 ** of the array ints.
1942 */
1943 for (int j = 0; j < 32; j++) {
1944 retry = 0;
1945 if ((vfta & (1 << j)) == 0)
1946 continue;
1947 vid = (i * 32) + j;
1948 /* Call the shared code mailbox routine */
1949 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1950 if (++retry > 5)
1951 break;
1952 }
1953 }
1954 }
1955 }
1956
1957 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1958 /*
1959 ** This routine is run via an vlan config EVENT,
1960 ** it enables us to use the HW Filter table since
1961 ** we can get the vlan id. This just creates the
1962 ** entry in the soft version of the VFTA, init will
1963 ** repopulate the real table.
1964 */
1965 static void
1966 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1967 {
1968 struct adapter *adapter = ifp->if_softc;
1969 u16 index, bit;
1970
1971 if (ifp->if_softc != arg) /* Not our event */
1972 return;
1973
1974 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1975 return;
1976
1977 IXGBE_CORE_LOCK(adapter);
1978 index = (vtag >> 5) & 0x7F;
1979 bit = vtag & 0x1F;
1980 ixv_shadow_vfta[index] |= (1 << bit);
1981 /* Re-init to load the changes */
1982 ixv_init_locked(adapter);
1983 IXGBE_CORE_UNLOCK(adapter);
1984 }
1985
1986 /*
1987 ** This routine is run via an vlan
1988 ** unconfig EVENT, remove our entry
1989 ** in the soft vfta.
1990 */
1991 static void
1992 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1993 {
1994 struct adapter *adapter = ifp->if_softc;
1995 u16 index, bit;
1996
1997 if (ifp->if_softc != arg)
1998 return;
1999
2000 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2001 return;
2002
2003 IXGBE_CORE_LOCK(adapter);
2004 index = (vtag >> 5) & 0x7F;
2005 bit = vtag & 0x1F;
2006 ixv_shadow_vfta[index] &= ~(1 << bit);
2007 /* Re-init to load the changes */
2008 ixv_init_locked(adapter);
2009 IXGBE_CORE_UNLOCK(adapter);
2010 }
2011 #endif
2012
2013 static void
2014 ixv_enable_intr(struct adapter *adapter)
2015 {
2016 struct ixgbe_hw *hw = &adapter->hw;
2017 struct ix_queue *que = adapter->queues;
2018 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2019
2020
2021 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
2022
2023 mask = IXGBE_EIMS_ENABLE_MASK;
2024 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
2025 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2026
2027 for (int i = 0; i < adapter->num_queues; i++, que++)
2028 ixv_enable_queue(adapter, que->msix);
2029
2030 IXGBE_WRITE_FLUSH(hw);
2031
2032 return;
2033 }
2034
2035 static void
2036 ixv_disable_intr(struct adapter *adapter)
2037 {
2038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2039 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
2040 IXGBE_WRITE_FLUSH(&adapter->hw);
2041 return;
2042 }
2043
2044 /*
2045 ** Setup the correct IVAR register for a particular MSIX interrupt
2046 ** - entry is the register array entry
2047 ** - vector is the MSIX vector for this queue
2048 ** - type is RX/TX/MISC
2049 */
2050 static void
2051 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2052 {
2053 struct ixgbe_hw *hw = &adapter->hw;
2054 u32 ivar, index;
2055
2056 vector |= IXGBE_IVAR_ALLOC_VAL;
2057
2058 if (type == -1) { /* MISC IVAR */
2059 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2060 ivar &= ~0xFF;
2061 ivar |= vector;
2062 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2063 } else { /* RX/TX IVARS */
2064 index = (16 * (entry & 1)) + (8 * type);
2065 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2066 ivar &= ~(0xFF << index);
2067 ivar |= (vector << index);
2068 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2069 }
2070 }
2071
2072 static void
2073 ixv_configure_ivars(struct adapter *adapter)
2074 {
2075 struct ix_queue *que = adapter->queues;
2076
2077 for (int i = 0; i < adapter->num_queues; i++, que++) {
2078 /* First the RX queue entry */
2079 ixv_set_ivar(adapter, i, que->msix, 0);
2080 /* ... and the TX */
2081 ixv_set_ivar(adapter, i, que->msix, 1);
2082 /* Set an initial value in EITR */
2083 IXGBE_WRITE_REG(&adapter->hw,
2084 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
2085 }
2086
2087 /* For the mailbox interrupt */
2088 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2089 }
2090
2091
2092 /*
2093 ** Tasklet handler for MSIX MBX interrupts
2094 ** - do outside interrupt since it might sleep
2095 */
2096 static void
2097 ixv_handle_mbx(void *context)
2098 {
2099 struct adapter *adapter = context;
2100
2101 ixgbe_check_link(&adapter->hw,
2102 &adapter->link_speed, &adapter->link_up, 0);
2103 ixv_update_link_status(adapter);
2104 }
2105
2106 /*
2107 ** The VF stats registers never have a truly virgin
2108 ** starting point, so this routine tries to make an
2109 ** artificial one, marking ground zero on attach as
2110 ** it were.
2111 */
2112 static void
2113 ixv_save_stats(struct adapter *adapter)
2114 {
2115 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2116
2117 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2118 stats->saved_reset_vfgprc +=
2119 stats->vfgprc.ev_count - stats->base_vfgprc;
2120 stats->saved_reset_vfgptc +=
2121 stats->vfgptc.ev_count - stats->base_vfgptc;
2122 stats->saved_reset_vfgorc +=
2123 stats->vfgorc.ev_count - stats->base_vfgorc;
2124 stats->saved_reset_vfgotc +=
2125 stats->vfgotc.ev_count - stats->base_vfgotc;
2126 stats->saved_reset_vfmprc +=
2127 stats->vfmprc.ev_count - stats->base_vfmprc;
2128 }
2129 }
2130
2131 static void
2132 ixv_init_stats(struct adapter *adapter)
2133 {
2134 struct ixgbe_hw *hw = &adapter->hw;
2135
2136 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2137 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2138 adapter->stats.vf.last_vfgorc |=
2139 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2140
2141 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2142 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2143 adapter->stats.vf.last_vfgotc |=
2144 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2145
2146 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2147
2148 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2149 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2150 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2151 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2152 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2153 }
2154
2155 #define UPDATE_STAT_32(reg, last, count) \
2156 { \
2157 u32 current = IXGBE_READ_REG(hw, reg); \
2158 if (current < last) \
2159 count.ev_count += 0x100000000LL; \
2160 last = current; \
2161 count.ev_count &= 0xFFFFFFFF00000000LL; \
2162 count.ev_count |= current; \
2163 }
2164
2165 #define UPDATE_STAT_36(lsb, msb, last, count) \
2166 { \
2167 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2168 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2169 u64 current = ((cur_msb << 32) | cur_lsb); \
2170 if (current < last) \
2171 count.ev_count += 0x1000000000LL; \
2172 last = current; \
2173 count.ev_count &= 0xFFFFFFF000000000LL; \
2174 count.ev_count |= current; \
2175 }
2176
2177 /*
2178 ** ixv_update_stats - Update the board statistics counters.
2179 */
2180 void
2181 ixv_update_stats(struct adapter *adapter)
2182 {
2183 struct ixgbe_hw *hw = &adapter->hw;
2184
2185 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2186 adapter->stats.vf.vfgprc);
2187 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2188 adapter->stats.vf.vfgptc);
2189 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2190 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2191 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2192 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2193 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2194 adapter->stats.vf.vfmprc);
2195 }
2196
2197 /*
2198 * Add statistic sysctls for the VF.
2199 */
2200 static void
2201 ixv_add_stats_sysctls(struct adapter *adapter)
2202 {
2203 device_t dev = adapter->dev;
2204 struct ix_queue *que = &adapter->queues[0];
2205 struct tx_ring *txr = que->txr;
2206 struct rx_ring *rxr = que->rxr;
2207
2208 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2209
2210 const char *xname = device_xname(dev);
2211
2212 /* Driver Statistics */
2213 evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
2214 NULL, xname, "Driver dropped packets");
2215 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2216 NULL, xname, "m_defrag() failed");
2217 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2218 NULL, xname, "Watchdog timeouts");
2219
2220 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2221 xname, "Good Packets Received");
2222 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2223 xname, "Good Octets Received");
2224 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2225 xname, "Multicast Packets Received");
2226 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2227 xname, "Good Packets Transmitted");
2228 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2229 xname, "Good Octets Transmitted");
2230 evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
2231 xname, "IRQs on queue");
2232 evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
2233 xname, "RX irqs on queue");
2234 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
2235 xname, "RX packets");
2236 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
2237 xname, "RX bytes");
2238 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
2239 xname, "Discarded RX packets");
2240 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
2241 xname, "TX Packets");
2242 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
2243 xname, "# of times not enough descriptors were available during TX");
2244 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
2245 xname, "TX TSO");
2246 }
2247
2248 static void
2249 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2250 const char *description, int *limit, int value)
2251 {
2252 device_t dev = adapter->dev;
2253 struct sysctllog **log;
2254 const struct sysctlnode *rnode, *cnode;
2255
2256 log = &adapter->sysctllog;
2257 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2258 aprint_error_dev(dev, "could not create sysctl root\n");
2259 return;
2260 }
2261 if (sysctl_createv(log, 0, &rnode, &cnode,
2262 CTLFLAG_READWRITE, CTLTYPE_INT,
2263 name, SYSCTL_DESCR(description),
2264 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2265 aprint_error_dev(dev, "could not create sysctl\n");
2266 *limit = value;
2267 }
2268
2269 /**********************************************************************
2270 *
2271 * This routine is called only when em_display_debug_stats is enabled.
2272 * This routine provides a way to take a look at important statistics
2273 * maintained by the driver and hardware.
2274 *
2275 **********************************************************************/
2276 static void
2277 ixv_print_debug_info(struct adapter *adapter)
2278 {
2279 device_t dev = adapter->dev;
2280 struct ixgbe_hw *hw = &adapter->hw;
2281 struct ix_queue *que = adapter->queues;
2282 struct rx_ring *rxr;
2283 struct tx_ring *txr;
2284 #ifdef LRO
2285 struct lro_ctrl *lro;
2286 #endif /* LRO */
2287
2288 device_printf(dev,"Error Byte Count = %u \n",
2289 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2290
2291 for (int i = 0; i < adapter->num_queues; i++, que++) {
2292 txr = que->txr;
2293 rxr = que->rxr;
2294 #ifdef LRO
2295 lro = &rxr->lro;
2296 #endif /* LRO */
2297 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2298 que->msix, (long)que->irqs.ev_count);
2299 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2300 rxr->me, (long long)rxr->rx_packets.ev_count);
2301 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2302 rxr->me, (long)rxr->rx_bytes.ev_count);
2303 #ifdef LRO
2304 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2305 rxr->me, (long long)lro->lro_queued);
2306 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2307 rxr->me, (long long)lro->lro_flushed);
2308 #endif /* LRO */
2309 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2310 txr->me, (long)txr->total_packets.ev_count);
2311 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2312 txr->me, (long)txr->no_desc_avail.ev_count);
2313 }
2314
2315 device_printf(dev,"MBX IRQ Handled: %lu\n",
2316 (long)adapter->link_irq.ev_count);
2317 return;
2318 }
2319
2320 static int
2321 ixv_sysctl_debug(SYSCTLFN_ARGS)
2322 {
2323 struct sysctlnode node;
2324 int error, result;
2325 struct adapter *adapter;
2326
2327 node = *rnode;
2328 adapter = (struct adapter *)node.sysctl_data;
2329 node.sysctl_data = &result;
2330 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2331
2332 if (error)
2333 return error;
2334
2335 if (result == 1)
2336 ixv_print_debug_info(adapter);
2337
2338 return 0;
2339 }
2340
2341 const struct sysctlnode *
2342 ixv_sysctl_instance(struct adapter *adapter)
2343 {
2344 const char *dvname;
2345 struct sysctllog **log;
2346 int rc;
2347 const struct sysctlnode *rnode;
2348
2349 log = &adapter->sysctllog;
2350 dvname = device_xname(adapter->dev);
2351
2352 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2353 0, CTLTYPE_NODE, dvname,
2354 SYSCTL_DESCR("ixv information and settings"),
2355 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2356 goto err;
2357
2358 return rnode;
2359 err:
2360 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2361 return NULL;
2362 }
2363