ixv.c revision 1.29 1 /******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
34 /*$NetBSD: ixv.c,v 1.29 2016/12/05 08:50:29 msaitoh Exp $*/
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "vlan.h"
41
42 /*********************************************************************
43 * Driver version
44 *********************************************************************/
45 char ixv_driver_version[] = "1.4.6-k";
46
47 /*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 /* required last entry */
64 {0, 0, 0, 0, 0}
65 };
66
67 /*********************************************************************
68 * Table of branding strings
69 *********************************************************************/
70
71 static const char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /*********************************************************************
76 * Function prototypes
77 *********************************************************************/
78 static int ixv_probe(device_t, cfdata_t, void *);
79 static void ixv_attach(device_t, device_t, void *);
80 static int ixv_detach(device_t, int);
81 #if 0
82 static int ixv_shutdown(device_t);
83 #endif
84 static int ixv_ioctl(struct ifnet *, u_long, void *);
85 static int ixv_init(struct ifnet *);
86 static void ixv_init_locked(struct adapter *);
87 static void ixv_stop(void *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static void ixv_identify_hardware(struct adapter *);
91 static int ixv_allocate_pci_resources(struct adapter *,
92 const struct pci_attach_args *);
93 static int ixv_allocate_msix(struct adapter *,
94 const struct pci_attach_args *);
95 static int ixv_setup_msix(struct adapter *);
96 static void ixv_free_pci_resources(struct adapter *);
97 static void ixv_local_timer(void *);
98 static void ixv_local_timer_locked(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
101
102 static void ixv_initialize_transmit_units(struct adapter *);
103 static void ixv_initialize_receive_units(struct adapter *);
104
105 static void ixv_enable_intr(struct adapter *);
106 static void ixv_disable_intr(struct adapter *);
107 static void ixv_set_multi(struct adapter *);
108 static void ixv_update_link_status(struct adapter *);
109 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
110 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
111 static void ixv_configure_ivars(struct adapter *);
112 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
113
114 static void ixv_setup_vlan_support(struct adapter *);
115 #if 0
116 static void ixv_register_vlan(void *, struct ifnet *, u16);
117 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
118 #endif
119
120 static void ixv_save_stats(struct adapter *);
121 static void ixv_init_stats(struct adapter *);
122 static void ixv_update_stats(struct adapter *);
123 static void ixv_add_stats_sysctls(struct adapter *);
124 static void ixv_set_sysctl_value(struct adapter *, const char *,
125 const char *, int *, int);
126
127 /* The MSI/X Interrupt handlers */
128 static int ixv_msix_que(void *);
129 static int ixv_msix_mbx(void *);
130
131 /* Deferred interrupt tasklets */
132 static void ixv_handle_que(void *);
133 static void ixv_handle_mbx(void *);
134
135 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
136 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
137
138 #ifdef DEV_NETMAP
139 /*
140 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
141 * if_ix.c.
142 */
143 extern void ixgbe_netmap_attach(struct adapter *adapter);
144
145 #include <net/netmap.h>
146 #include <sys/selinfo.h>
147 #include <dev/netmap/netmap_kern.h>
148 #endif /* DEV_NETMAP */
149
150 /*********************************************************************
151 * FreeBSD Device Interface Entry Points
152 *********************************************************************/
153
154 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
155 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
156 DVF_DETACH_SHUTDOWN);
157
158 # if 0
159 static device_method_t ixv_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, ixv_probe),
162 DEVMETHOD(device_attach, ixv_attach),
163 DEVMETHOD(device_detach, ixv_detach),
164 DEVMETHOD(device_shutdown, ixv_shutdown),
165 DEVMETHOD_END
166 };
167 #endif
168
169 #if 0
170 static driver_t ixv_driver = {
171 "ixv", ixv_methods, sizeof(struct adapter),
172 };
173
174 devclass_t ixv_devclass;
175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
178 #ifdef DEV_NETMAP
179 MODULE_DEPEND(ix, netmap, 1, 1, 1);
180 #endif /* DEV_NETMAP */
181 /* XXX depend on 'ix' ? */
182 #endif
183
184 /*
185 ** TUNEABLE PARAMETERS:
186 */
187
188 /* Number of Queues - do not exceed MSIX vectors - 1 */
189 static int ixv_num_queues = 1;
190 #define TUNABLE_INT(__x, __y)
191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
192
193 /*
194 ** AIM: Adaptive Interrupt Moderation
195 ** which means that the interrupt rate
196 ** is varied over time based on the
197 ** traffic for that interrupt vector
198 */
199 static int ixv_enable_aim = FALSE;
200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
201
202 /* How many packets rxeof tries to clean at a time */
203 static int ixv_rx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
205
206 /* How many packets txeof tries to clean at a time */
207 static int ixv_tx_process_limit = 256;
208 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
209
210 /*
211 ** Number of TX descriptors per ring,
212 ** setting higher than RX as this seems
213 ** the better performing choice.
214 */
215 static int ixv_txd = DEFAULT_TXD;
216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
217
218 /* Number of RX descriptors per ring */
219 static int ixv_rxd = DEFAULT_RXD;
220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
221
222 /*
223 ** Shadow VFTA table, this is needed because
224 ** the real filter table gets cleared during
225 ** a soft reset and we need to repopulate it.
226 */
227 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
228
229 /*********************************************************************
230 * Device identification routine
231 *
232 * ixv_probe determines if the driver should be loaded on
233 * adapter based on PCI vendor/device id of the adapter.
234 *
235 * return 1 on success, 0 on failure
236 *********************************************************************/
237
238 static int
239 ixv_probe(device_t dev, cfdata_t cf, void *aux)
240 {
241 #ifdef __HAVE_PCI_MSI_MSIX
242 const struct pci_attach_args *pa = aux;
243
244 return (ixv_lookup(pa) != NULL) ? 1 : 0;
245 #else
246 return 0;
247 #endif
248 }
249
250 static ixgbe_vendor_info_t *
251 ixv_lookup(const struct pci_attach_args *pa)
252 {
253 pcireg_t subid;
254 ixgbe_vendor_info_t *ent;
255
256 INIT_DEBUGOUT("ixv_probe: begin");
257
258 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
259 return NULL;
260
261 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
262
263 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
264 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
265 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
266
267 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
269
270 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 return ent;
273 }
274 }
275 return NULL;
276 }
277
278
279 static void
280 ixv_sysctl_attach(struct adapter *adapter)
281 {
282 struct sysctllog **log;
283 const struct sysctlnode *rnode, *cnode;
284 device_t dev;
285
286 dev = adapter->dev;
287 log = &adapter->sysctllog;
288
289 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
290 aprint_error_dev(dev, "could not create sysctl root\n");
291 return;
292 }
293
294 if (sysctl_createv(log, 0, &rnode, &cnode,
295 CTLFLAG_READWRITE, CTLTYPE_INT,
296 "debug", SYSCTL_DESCR("Debug Info"),
297 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
298 aprint_error_dev(dev, "could not create sysctl\n");
299
300 /* XXX This is an *instance* sysctl controlling a *global* variable.
301 * XXX It's that way in the FreeBSD driver that this derives from.
302 */
303 if (sysctl_createv(log, 0, &rnode, &cnode,
304 CTLFLAG_READWRITE, CTLTYPE_INT,
305 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
306 NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
307 aprint_error_dev(dev, "could not create sysctl\n");
308 }
309
310 /*********************************************************************
311 * Device initialization routine
312 *
313 * The attach entry point is called when the driver is being loaded.
314 * This routine identifies the type of hardware, allocates all resources
315 * and initializes the hardware.
316 *
317 * return 0 on success, positive on failure
318 *********************************************************************/
319
320 static void
321 ixv_attach(device_t parent, device_t dev, void *aux)
322 {
323 struct adapter *adapter;
324 struct ixgbe_hw *hw;
325 int error = 0;
326 ixgbe_vendor_info_t *ent;
327 const struct pci_attach_args *pa = aux;
328
329 INIT_DEBUGOUT("ixv_attach: begin");
330
331 /* Allocate, clear, and link in our adapter structure */
332 adapter = device_private(dev);
333 adapter->dev = dev;
334 hw = &adapter->hw;
335
336 #ifdef DEV_NETMAP
337 adapter->init_locked = ixv_init_locked;
338 adapter->stop_locked = ixv_stop;
339 #endif
340
341 adapter->osdep.pc = pa->pa_pc;
342 adapter->osdep.tag = pa->pa_tag;
343 adapter->osdep.dmat = pa->pa_dmat;
344 adapter->osdep.attached = false;
345
346 ent = ixv_lookup(pa);
347
348 KASSERT(ent != NULL);
349
350 aprint_normal(": %s, Version - %s\n",
351 ixv_strings[ent->index], ixv_driver_version);
352
353 /* Core Lock Init*/
354 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
355
356 /* SYSCTL APIs */
357 ixv_sysctl_attach(adapter);
358
359 /* Set up the timer callout */
360 callout_init(&adapter->timer, 0);
361
362 /* Determine hardware revision */
363 ixv_identify_hardware(adapter);
364
365 /* Do base PCI setup - map BAR0 */
366 if (ixv_allocate_pci_resources(adapter, pa)) {
367 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
368 error = ENXIO;
369 goto err_out;
370 }
371
372 /* Sysctls for limiting the amount of work done in the taskqueues */
373 ixv_set_sysctl_value(adapter, "rx_processing_limit",
374 "max number of rx packets to process",
375 &adapter->rx_process_limit, ixv_rx_process_limit);
376
377 ixv_set_sysctl_value(adapter, "tx_processing_limit",
378 "max number of tx packets to process",
379 &adapter->tx_process_limit, ixv_tx_process_limit);
380
381 /* Do descriptor calc and sanity checks */
382 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
383 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
384 aprint_error_dev(dev, "TXD config issue, using default!\n");
385 adapter->num_tx_desc = DEFAULT_TXD;
386 } else
387 adapter->num_tx_desc = ixv_txd;
388
389 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
390 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
391 aprint_error_dev(dev, "RXD config issue, using default!\n");
392 adapter->num_rx_desc = DEFAULT_RXD;
393 } else
394 adapter->num_rx_desc = ixv_rxd;
395
396 /* Allocate our TX/RX Queues */
397 if (ixgbe_allocate_queues(adapter)) {
398 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
399 error = ENOMEM;
400 goto err_out;
401 }
402
403 /*
404 ** Initialize the shared code: its
405 ** at this point the mac type is set.
406 */
407 error = ixgbe_init_shared_code(hw);
408 if (error) {
409 aprint_error_dev(dev, "ixgbe_init_shared_code() failed!\n");
410 error = EIO;
411 goto err_late;
412 }
413
414 /* Setup the mailbox */
415 ixgbe_init_mbx_params_vf(hw);
416
417 /* Reset mbox api to 1.0 */
418 error = ixgbe_reset_hw(hw);
419 if (error == IXGBE_ERR_RESET_FAILED)
420 aprint_error_dev(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
421 else if (error)
422 aprint_error_dev(dev, "ixgbe_reset_hw() failed with error %d\n", error);
423 if (error) {
424 error = EIO;
425 goto err_late;
426 }
427
428 /* Negotiate mailbox API version */
429 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
430 if (error) {
431 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
432 error = EIO;
433 goto err_late;
434 }
435
436 error = ixgbe_init_hw(hw);
437 if (error) {
438 aprint_error_dev(dev, "ixgbe_init_hw() failed!\n");
439 error = EIO;
440 goto err_late;
441 }
442
443 error = ixv_allocate_msix(adapter, pa);
444 if (error) {
445 device_printf(dev, "ixv_allocate_msix() failed!\n");
446 goto err_late;
447 }
448
449 /* If no mac address was assigned, make a random one */
450 if (!ixv_check_ether_addr(hw->mac.addr)) {
451 u8 addr[ETHER_ADDR_LEN];
452 uint64_t rndval = cprng_fast64();
453
454 memcpy(addr, &rndval, sizeof(addr));
455 addr[0] &= 0xFE;
456 addr[0] |= 0x02;
457 bcopy(addr, hw->mac.addr, sizeof(addr));
458 }
459
460 /* Setup OS specific network interface */
461 ixv_setup_interface(dev, adapter);
462
463 /* Do the stats setup */
464 ixv_save_stats(adapter);
465 ixv_init_stats(adapter);
466 ixv_add_stats_sysctls(adapter);
467
468 /* Register for VLAN events */
469 #if 0 /* XXX delete after write? */
470 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
471 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
472 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
473 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
474 #endif
475
476 #ifdef DEV_NETMAP
477 ixgbe_netmap_attach(adapter);
478 #endif /* DEV_NETMAP */
479 INIT_DEBUGOUT("ixv_attach: end");
480 adapter->osdep.attached = true;
481 return;
482
483 err_late:
484 ixgbe_free_transmit_structures(adapter);
485 ixgbe_free_receive_structures(adapter);
486 err_out:
487 ixv_free_pci_resources(adapter);
488 return;
489
490 }
491
492 /*********************************************************************
493 * Device removal routine
494 *
495 * The detach entry point is called when the driver is being removed.
496 * This routine stops the adapter and deallocates all the resources
497 * that were allocated for driver operation.
498 *
499 * return 0 on success, positive on failure
500 *********************************************************************/
501
502 static int
503 ixv_detach(device_t dev, int flags)
504 {
505 struct adapter *adapter = device_private(dev);
506 struct ix_queue *que = adapter->queues;
507
508 INIT_DEBUGOUT("ixv_detach: begin");
509 if (adapter->osdep.attached == false)
510 return 0;
511
512 #if NVLAN > 0
513 /* Make sure VLANS are not using driver */
514 if (!VLAN_ATTACHED(&adapter->osdep.ec))
515 ; /* nothing to do: no VLANs */
516 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
517 vlan_ifdetach(adapter->ifp);
518 else {
519 aprint_error_dev(dev, "VLANs in use, detach first\n");
520 return EBUSY;
521 }
522 #endif
523
524 IXGBE_CORE_LOCK(adapter);
525 ixv_stop(adapter);
526 IXGBE_CORE_UNLOCK(adapter);
527
528 for (int i = 0; i < adapter->num_queues; i++, que++) {
529 #ifndef IXGBE_LEGACY_TX
530 softint_disestablish(txr->txq_si);
531 #endif
532 softint_disestablish(que->que_si);
533 }
534
535 /* Drain the Mailbox(link) queue */
536 softint_disestablish(adapter->link_si);
537
538 /* Unregister VLAN events */
539 #if 0 /* XXX msaitoh delete after write? */
540 if (adapter->vlan_attach != NULL)
541 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
542 if (adapter->vlan_detach != NULL)
543 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
544 #endif
545
546 ether_ifdetach(adapter->ifp);
547 callout_halt(&adapter->timer, NULL);
548 #ifdef DEV_NETMAP
549 netmap_detach(adapter->ifp);
550 #endif /* DEV_NETMAP */
551 ixv_free_pci_resources(adapter);
552 #if 0 /* XXX the NetBSD port is probably missing something here */
553 bus_generic_detach(dev);
554 #endif
555 if_detach(adapter->ifp);
556
557 ixgbe_free_transmit_structures(adapter);
558 ixgbe_free_receive_structures(adapter);
559
560 IXGBE_CORE_LOCK_DESTROY(adapter);
561 return (0);
562 }
563
564 /*********************************************************************
565 *
566 * Shutdown entry point
567 *
568 **********************************************************************/
569 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
570 static int
571 ixv_shutdown(device_t dev)
572 {
573 struct adapter *adapter = device_private(dev);
574 IXGBE_CORE_LOCK(adapter);
575 ixv_stop(adapter);
576 IXGBE_CORE_UNLOCK(adapter);
577 return (0);
578 }
579 #endif
580
581 static int
582 ixv_ifflags_cb(struct ethercom *ec)
583 {
584 struct ifnet *ifp = &ec->ec_if;
585 struct adapter *adapter = ifp->if_softc;
586 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
587
588 IXGBE_CORE_LOCK(adapter);
589
590 if (change != 0)
591 adapter->if_flags = ifp->if_flags;
592
593 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
594 rc = ENETRESET;
595
596 IXGBE_CORE_UNLOCK(adapter);
597
598 return rc;
599 }
600
601 /*********************************************************************
602 * Ioctl entry point
603 *
604 * ixv_ioctl is called when the user wants to configure the
605 * interface.
606 *
607 * return 0 on success, positive on failure
608 **********************************************************************/
609
610 static int
611 ixv_ioctl(struct ifnet * ifp, u_long command, void *data)
612 {
613 struct adapter *adapter = ifp->if_softc;
614 struct ifcapreq *ifcr = data;
615 struct ifreq *ifr = (struct ifreq *) data;
616 int error = 0;
617 int l4csum_en;
618 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
619 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
620
621 switch (command) {
622 case SIOCSIFFLAGS:
623 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
624 break;
625 case SIOCADDMULTI:
626 case SIOCDELMULTI:
627 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
628 break;
629 case SIOCSIFMEDIA:
630 case SIOCGIFMEDIA:
631 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
632 break;
633 case SIOCSIFCAP:
634 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
635 break;
636 case SIOCSIFMTU:
637 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
638 break;
639 default:
640 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
641 break;
642 }
643
644 switch (command) {
645 case SIOCSIFMEDIA:
646 case SIOCGIFMEDIA:
647 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
648 case SIOCSIFCAP:
649 /* Layer-4 Rx checksum offload has to be turned on and
650 * off as a unit.
651 */
652 l4csum_en = ifcr->ifcr_capenable & l4csum;
653 if (l4csum_en != l4csum && l4csum_en != 0)
654 return EINVAL;
655 /*FALLTHROUGH*/
656 case SIOCADDMULTI:
657 case SIOCDELMULTI:
658 case SIOCSIFFLAGS:
659 case SIOCSIFMTU:
660 default:
661 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
662 return error;
663 if ((ifp->if_flags & IFF_RUNNING) == 0)
664 ;
665 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
666 IXGBE_CORE_LOCK(adapter);
667 ixv_init_locked(adapter);
668 IXGBE_CORE_UNLOCK(adapter);
669 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
670 /*
671 * Multicast list has changed; set the hardware filter
672 * accordingly.
673 */
674 IXGBE_CORE_LOCK(adapter);
675 ixv_disable_intr(adapter);
676 ixv_set_multi(adapter);
677 ixv_enable_intr(adapter);
678 IXGBE_CORE_UNLOCK(adapter);
679 }
680 return 0;
681 }
682 }
683
684 /*********************************************************************
685 * Init entry point
686 *
687 * This routine is used in two ways. It is used by the stack as
688 * init entry point in network interface structure. It is also used
689 * by the driver as a hw/sw initialization routine to get to a
690 * consistent state.
691 *
692 * return 0 on success, positive on failure
693 **********************************************************************/
694 #define IXGBE_MHADD_MFS_SHIFT 16
695
696 static void
697 ixv_init_locked(struct adapter *adapter)
698 {
699 struct ifnet *ifp = adapter->ifp;
700 device_t dev = adapter->dev;
701 struct ixgbe_hw *hw = &adapter->hw;
702 int error = 0;
703
704 INIT_DEBUGOUT("ixv_init_locked: begin");
705 KASSERT(mutex_owned(&adapter->core_mtx));
706 hw->adapter_stopped = FALSE;
707 ixgbe_stop_adapter(hw);
708 callout_stop(&adapter->timer);
709
710 /* reprogram the RAR[0] in case user changed it. */
711 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
712
713 /* Get the latest mac address, User can use a LAA */
714 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
715 IXGBE_ETH_LENGTH_OF_ADDRESS);
716 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
717 hw->addr_ctrl.rar_used_count = 1;
718
719 /* Prepare transmit descriptors and buffers */
720 if (ixgbe_setup_transmit_structures(adapter)) {
721 aprint_error_dev(dev, "Could not setup transmit structures\n");
722 ixv_stop(adapter);
723 return;
724 }
725
726 /* Reset VF and renegotiate mailbox API version */
727 ixgbe_reset_hw(hw);
728 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
729 if (error)
730 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
731
732 ixv_initialize_transmit_units(adapter);
733
734 /* Setup Multicast table */
735 ixv_set_multi(adapter);
736
737 /*
738 ** Determine the correct mbuf pool
739 ** for doing jumbo/headersplit
740 */
741 if (ifp->if_mtu > ETHERMTU)
742 adapter->rx_mbuf_sz = MJUMPAGESIZE;
743 else
744 adapter->rx_mbuf_sz = MCLBYTES;
745
746 /* Prepare receive descriptors and buffers */
747 if (ixgbe_setup_receive_structures(adapter)) {
748 device_printf(dev, "Could not setup receive structures\n");
749 ixv_stop(adapter);
750 return;
751 }
752
753 /* Configure RX settings */
754 ixv_initialize_receive_units(adapter);
755
756 #if 0 /* XXX isn't it required? -- msaitoh */
757 /* Set the various hardware offload abilities */
758 ifp->if_hwassist = 0;
759 if (ifp->if_capenable & IFCAP_TSO4)
760 ifp->if_hwassist |= CSUM_TSO;
761 if (ifp->if_capenable & IFCAP_TXCSUM) {
762 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
763 #if __FreeBSD_version >= 800000
764 ifp->if_hwassist |= CSUM_SCTP;
765 #endif
766 }
767 #endif
768
769 /* Set up VLAN offload and filter */
770 ixv_setup_vlan_support(adapter);
771
772 /* Set up MSI/X routing */
773 ixv_configure_ivars(adapter);
774
775 /* Set up auto-mask */
776 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
777
778 /* Set moderation on the Link interrupt */
779 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
780
781 /* Stats init */
782 ixv_init_stats(adapter);
783
784 /* Config/Enable Link */
785 ixv_config_link(adapter);
786
787 /* Start watchdog */
788 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
789
790 /* And now turn on interrupts */
791 ixv_enable_intr(adapter);
792
793 /* Now inform the stack we're ready */
794 ifp->if_flags |= IFF_RUNNING;
795 ifp->if_flags &= ~IFF_OACTIVE;
796
797 return;
798 }
799
800 static int
801 ixv_init(struct ifnet *ifp)
802 {
803 struct adapter *adapter = ifp->if_softc;
804
805 IXGBE_CORE_LOCK(adapter);
806 ixv_init_locked(adapter);
807 IXGBE_CORE_UNLOCK(adapter);
808 return 0;
809 }
810
811
812 /*
813 **
814 ** MSIX Interrupt Handlers and Tasklets
815 **
816 */
817
818 static inline void
819 ixv_enable_queue(struct adapter *adapter, u32 vector)
820 {
821 struct ixgbe_hw *hw = &adapter->hw;
822 u32 queue = 1 << vector;
823 u32 mask;
824
825 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
826 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
827 }
828
829 static inline void
830 ixv_disable_queue(struct adapter *adapter, u32 vector)
831 {
832 struct ixgbe_hw *hw = &adapter->hw;
833 u64 queue = (u64)(1 << vector);
834 u32 mask;
835
836 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
837 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
838 }
839
840 static inline void
841 ixv_rearm_queues(struct adapter *adapter, u64 queues)
842 {
843 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
845 }
846
847
848 static void
849 ixv_handle_que(void *context)
850 {
851 struct ix_queue *que = context;
852 struct adapter *adapter = que->adapter;
853 struct tx_ring *txr = que->txr;
854 struct ifnet *ifp = adapter->ifp;
855 bool more;
856
857 if (ifp->if_flags & IFF_RUNNING) {
858 more = ixgbe_rxeof(que);
859 IXGBE_TX_LOCK(txr);
860 ixgbe_txeof(txr);
861 #if __FreeBSD_version >= 800000
862 if (!drbr_empty(ifp, txr->br))
863 ixgbe_mq_start_locked(ifp, txr);
864 #else
865 if (!IFQ_IS_EMPTY(&ifp->if_snd))
866 ixgbe_start_locked(txr, ifp);
867 #endif
868 IXGBE_TX_UNLOCK(txr);
869 if (more) {
870 adapter->req.ev_count++;
871 softint_schedule(que->que_si);
872 return;
873 }
874 }
875
876 /* Reenable this interrupt */
877 ixv_enable_queue(adapter, que->msix);
878 return;
879 }
880
881 /*********************************************************************
882 *
883 * MSI Queue Interrupt Service routine
884 *
885 **********************************************************************/
886 int
887 ixv_msix_que(void *arg)
888 {
889 struct ix_queue *que = arg;
890 struct adapter *adapter = que->adapter;
891 struct ifnet *ifp = adapter->ifp;
892 struct tx_ring *txr = que->txr;
893 struct rx_ring *rxr = que->rxr;
894 bool more;
895 u32 newitr = 0;
896
897 ixv_disable_queue(adapter, que->msix);
898 ++que->irqs.ev_count;
899
900 more = ixgbe_rxeof(que);
901
902 IXGBE_TX_LOCK(txr);
903 ixgbe_txeof(txr);
904 /*
905 ** Make certain that if the stack
906 ** has anything queued the task gets
907 ** scheduled to handle it.
908 */
909 #ifdef IXGBE_LEGACY_TX
910 if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
911 ixgbe_start_locked(txr, ifp);
912 #else
913 if (!drbr_empty(adapter->ifp, txr->br))
914 ixgbe_mq_start_locked(ifp, txr);
915 #endif
916 IXGBE_TX_UNLOCK(txr);
917
918 /* Do AIM now? */
919
920 if (ixv_enable_aim == FALSE)
921 goto no_calc;
922 /*
923 ** Do Adaptive Interrupt Moderation:
924 ** - Write out last calculated setting
925 ** - Calculate based on average size over
926 ** the last interval.
927 */
928 if (que->eitr_setting)
929 IXGBE_WRITE_REG(&adapter->hw,
930 IXGBE_VTEITR(que->msix),
931 que->eitr_setting);
932
933 que->eitr_setting = 0;
934
935 /* Idle, do nothing */
936 if ((txr->bytes == 0) && (rxr->bytes == 0))
937 goto no_calc;
938
939 if ((txr->bytes) && (txr->packets))
940 newitr = txr->bytes/txr->packets;
941 if ((rxr->bytes) && (rxr->packets))
942 newitr = max(newitr,
943 (rxr->bytes / rxr->packets));
944 newitr += 24; /* account for hardware frame, crc */
945
946 /* set an upper boundary */
947 newitr = min(newitr, 3000);
948
949 /* Be nice to the mid range */
950 if ((newitr > 300) && (newitr < 1200))
951 newitr = (newitr / 3);
952 else
953 newitr = (newitr / 2);
954
955 newitr |= newitr << 16;
956
957 /* save for next interrupt */
958 que->eitr_setting = newitr;
959
960 /* Reset state */
961 txr->bytes = 0;
962 txr->packets = 0;
963 rxr->bytes = 0;
964 rxr->packets = 0;
965
966 no_calc:
967 if (more)
968 softint_schedule(que->que_si);
969 else /* Reenable this interrupt */
970 ixv_enable_queue(adapter, que->msix);
971 return 1;
972 }
973
974 static int
975 ixv_msix_mbx(void *arg)
976 {
977 struct adapter *adapter = arg;
978 struct ixgbe_hw *hw = &adapter->hw;
979 u32 reg;
980
981 ++adapter->link_irq.ev_count;
982
983 /* First get the cause */
984 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
985 /* Clear interrupt with write */
986 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
987
988 /* Link status change */
989 if (reg & IXGBE_EICR_LSC)
990 softint_schedule(adapter->link_si);
991
992 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
993 return 1;
994 }
995
996 /*********************************************************************
997 *
998 * Media Ioctl callback
999 *
1000 * This routine is called whenever the user queries the status of
1001 * the interface using ifconfig.
1002 *
1003 **********************************************************************/
1004 static void
1005 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1006 {
1007 struct adapter *adapter = ifp->if_softc;
1008
1009 INIT_DEBUGOUT("ixv_media_status: begin");
1010 IXGBE_CORE_LOCK(adapter);
1011 ixv_update_link_status(adapter);
1012
1013 ifmr->ifm_status = IFM_AVALID;
1014 ifmr->ifm_active = IFM_ETHER;
1015
1016 if (!adapter->link_active) {
1017 IXGBE_CORE_UNLOCK(adapter);
1018 return;
1019 }
1020
1021 ifmr->ifm_status |= IFM_ACTIVE;
1022
1023 switch (adapter->link_speed) {
1024 case IXGBE_LINK_SPEED_1GB_FULL:
1025 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1026 break;
1027 case IXGBE_LINK_SPEED_10GB_FULL:
1028 ifmr->ifm_active |= IFM_FDX;
1029 break;
1030 }
1031
1032 IXGBE_CORE_UNLOCK(adapter);
1033
1034 return;
1035 }
1036
1037 /*********************************************************************
1038 *
1039 * Media Ioctl callback
1040 *
1041 * This routine is called when the user changes speed/duplex using
1042 * media/mediopt option with ifconfig.
1043 *
1044 **********************************************************************/
1045 static int
1046 ixv_media_change(struct ifnet * ifp)
1047 {
1048 struct adapter *adapter = ifp->if_softc;
1049 struct ifmedia *ifm = &adapter->media;
1050
1051 INIT_DEBUGOUT("ixv_media_change: begin");
1052
1053 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1054 return (EINVAL);
1055
1056 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1057 case IFM_AUTO:
1058 break;
1059 default:
1060 device_printf(adapter->dev, "Only auto media type\n");
1061 return (EINVAL);
1062 }
1063
1064 return (0);
1065 }
1066
1067
1068 /*********************************************************************
1069 * Multicast Update
1070 *
1071 * This routine is called whenever multicast address list is updated.
1072 *
1073 **********************************************************************/
1074 #define IXGBE_RAR_ENTRIES 16
1075
1076 static void
1077 ixv_set_multi(struct adapter *adapter)
1078 {
1079 struct ether_multi *enm;
1080 struct ether_multistep step;
1081 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1082 u8 *update_ptr;
1083 int mcnt = 0;
1084 struct ethercom *ec = &adapter->osdep.ec;
1085
1086 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1087
1088 ETHER_FIRST_MULTI(step, ec, enm);
1089 while (enm != NULL) {
1090 bcopy(enm->enm_addrlo,
1091 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1092 IXGBE_ETH_LENGTH_OF_ADDRESS);
1093 mcnt++;
1094 /* XXX This might be required --msaitoh */
1095 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1096 break;
1097 ETHER_NEXT_MULTI(step, enm);
1098 }
1099
1100 update_ptr = mta;
1101
1102 ixgbe_update_mc_addr_list(&adapter->hw,
1103 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1104
1105 return;
1106 }
1107
1108 /*
1109 * This is an iterator function now needed by the multicast
1110 * shared code. It simply feeds the shared code routine the
1111 * addresses in the array of ixv_set_multi() one by one.
1112 */
1113 static u8 *
1114 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1115 {
1116 u8 *addr = *update_ptr;
1117 u8 *newptr;
1118 *vmdq = 0;
1119
1120 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1121 *update_ptr = newptr;
1122 return addr;
1123 }
1124
1125 /*********************************************************************
1126 * Timer routine
1127 *
1128 * This routine checks for link status,updates statistics,
1129 * and runs the watchdog check.
1130 *
1131 **********************************************************************/
1132
1133 static void
1134 ixv_local_timer(void *arg)
1135 {
1136 struct adapter *adapter = arg;
1137
1138 IXGBE_CORE_LOCK(adapter);
1139 ixv_local_timer_locked(adapter);
1140 IXGBE_CORE_UNLOCK(adapter);
1141 }
1142
1143 static void
1144 ixv_local_timer_locked(void *arg)
1145 {
1146 struct adapter *adapter = arg;
1147 device_t dev = adapter->dev;
1148 struct ix_queue *que = adapter->queues;
1149 u64 queues = 0;
1150 int hung = 0;
1151
1152 KASSERT(mutex_owned(&adapter->core_mtx));
1153
1154 ixv_update_link_status(adapter);
1155
1156 /* Stats Update */
1157 ixv_update_stats(adapter);
1158
1159 /*
1160 ** Check the TX queues status
1161 ** - mark hung queues so we don't schedule on them
1162 ** - watchdog only if all queues show hung
1163 */
1164 for (int i = 0; i < adapter->num_queues; i++, que++) {
1165 /* Keep track of queues with work for soft irq */
1166 if (que->txr->busy)
1167 queues |= ((u64)1 << que->me);
1168 /*
1169 ** Each time txeof runs without cleaning, but there
1170 ** are uncleaned descriptors it increments busy. If
1171 ** we get to the MAX we declare it hung.
1172 */
1173 if (que->busy == IXGBE_QUEUE_HUNG) {
1174 ++hung;
1175 /* Mark the queue as inactive */
1176 adapter->active_queues &= ~((u64)1 << que->me);
1177 continue;
1178 } else {
1179 /* Check if we've come back from hung */
1180 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1181 adapter->active_queues |= ((u64)1 << que->me);
1182 }
1183 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1184 device_printf(dev,"Warning queue %d "
1185 "appears to be hung!\n", i);
1186 que->txr->busy = IXGBE_QUEUE_HUNG;
1187 ++hung;
1188 }
1189
1190 }
1191
1192 /* Only truly watchdog if all queues show hung */
1193 if (hung == adapter->num_queues)
1194 goto watchdog;
1195 else if (queues != 0) { /* Force an IRQ on queues with work */
1196 ixv_rearm_queues(adapter, queues);
1197 }
1198
1199 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1200 return;
1201
1202 watchdog:
1203 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1204 adapter->ifp->if_flags &= ~IFF_RUNNING;
1205 adapter->watchdog_events.ev_count++;
1206 ixv_init_locked(adapter);
1207 }
1208
1209 /*
1210 ** Note: this routine updates the OS on the link state
1211 ** the real check of the hardware only happens with
1212 ** a link interrupt.
1213 */
1214 static void
1215 ixv_update_link_status(struct adapter *adapter)
1216 {
1217 struct ifnet *ifp = adapter->ifp;
1218 device_t dev = adapter->dev;
1219
1220 if (adapter->link_up){
1221 if (adapter->link_active == FALSE) {
1222 if (bootverbose)
1223 device_printf(dev,"Link is up %d Gbps %s \n",
1224 ((adapter->link_speed == 128)? 10:1),
1225 "Full Duplex");
1226 adapter->link_active = TRUE;
1227 if_link_state_change(ifp, LINK_STATE_UP);
1228 }
1229 } else { /* Link down */
1230 if (adapter->link_active == TRUE) {
1231 if (bootverbose)
1232 device_printf(dev,"Link is Down\n");
1233 if_link_state_change(ifp, LINK_STATE_DOWN);
1234 adapter->link_active = FALSE;
1235 }
1236 }
1237
1238 return;
1239 }
1240
1241
1242 static void
1243 ixv_ifstop(struct ifnet *ifp, int disable)
1244 {
1245 struct adapter *adapter = ifp->if_softc;
1246
1247 IXGBE_CORE_LOCK(adapter);
1248 ixv_stop(adapter);
1249 IXGBE_CORE_UNLOCK(adapter);
1250 }
1251
1252 /*********************************************************************
1253 *
1254 * This routine disables all traffic on the adapter by issuing a
1255 * global reset on the MAC and deallocates TX/RX buffers.
1256 *
1257 **********************************************************************/
1258
1259 static void
1260 ixv_stop(void *arg)
1261 {
1262 struct ifnet *ifp;
1263 struct adapter *adapter = arg;
1264 struct ixgbe_hw *hw = &adapter->hw;
1265 ifp = adapter->ifp;
1266
1267 KASSERT(mutex_owned(&adapter->core_mtx));
1268
1269 INIT_DEBUGOUT("ixv_stop: begin\n");
1270 ixv_disable_intr(adapter);
1271
1272 /* Tell the stack that the interface is no longer active */
1273 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1274
1275 ixgbe_reset_hw(hw);
1276 adapter->hw.adapter_stopped = FALSE;
1277 ixgbe_stop_adapter(hw);
1278 callout_stop(&adapter->timer);
1279
1280 /* reprogram the RAR[0] in case user changed it. */
1281 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1282
1283 return;
1284 }
1285
1286
1287 /*********************************************************************
1288 *
1289 * Determine hardware revision.
1290 *
1291 **********************************************************************/
1292 static void
1293 ixv_identify_hardware(struct adapter *adapter)
1294 {
1295 pcitag_t tag;
1296 pci_chipset_tag_t pc;
1297 pcireg_t subid, id;
1298 struct ixgbe_hw *hw = &adapter->hw;
1299
1300 pc = adapter->osdep.pc;
1301 tag = adapter->osdep.tag;
1302
1303 /*
1304 ** Make sure BUSMASTER is set, on a VM under
1305 ** KVM it may not be and will break things.
1306 */
1307 ixgbe_pci_enable_busmaster(pc, tag);
1308
1309 id = pci_conf_read(pc, tag, PCI_ID_REG);
1310 subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
1311
1312 /* Save off the information about this board */
1313 hw->vendor_id = PCI_VENDOR(id);
1314 hw->device_id = PCI_PRODUCT(id);
1315 hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
1316 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
1317 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
1318
1319 /* We need this to determine device-specific things */
1320 ixgbe_set_mac_type(hw);
1321
1322 /* Set the right number of segments */
1323 adapter->num_segs = IXGBE_82599_SCATTER;
1324
1325 return;
1326 }
1327
1328 /*********************************************************************
1329 *
1330 * Setup MSIX Interrupt resources and handlers
1331 *
1332 **********************************************************************/
1333 static int
1334 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
1335 {
1336 device_t dev = adapter->dev;
1337 struct ix_queue *que = adapter->queues;
1338 struct tx_ring *txr = adapter->tx_rings;
1339 int error, rid, vector = 0;
1340 pci_chipset_tag_t pc;
1341 pcitag_t tag;
1342 char intrbuf[PCI_INTRSTR_LEN];
1343 const char *intrstr = NULL;
1344 kcpuset_t *affinity;
1345 int cpu_id = 0;
1346
1347 pc = adapter->osdep.pc;
1348 tag = adapter->osdep.tag;
1349
1350 if (pci_msix_alloc_exact(pa,
1351 &adapter->osdep.intrs, IXG_MSIX_NINTR) != 0)
1352 return (ENXIO);
1353
1354 kcpuset_create(&affinity, false);
1355 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1356 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
1357 sizeof(intrbuf));
1358 #ifdef IXV_MPSAFE
1359 pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
1360 true);
1361 #endif
1362 /* Set the handler function */
1363 adapter->osdep.ihs[i] = pci_intr_establish(pc,
1364 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que);
1365 if (adapter->osdep.ihs[i] == NULL) {
1366 que->res = NULL;
1367 aprint_error_dev(dev,
1368 "Failed to register QUE handler");
1369 kcpuset_destroy(affinity);
1370 return (ENXIO);
1371 }
1372 que->msix = vector;
1373 adapter->active_queues |= (u64)(1 << que->msix);
1374
1375 cpu_id = i;
1376 /* Round-robin affinity */
1377 kcpuset_zero(affinity);
1378 kcpuset_set(affinity, cpu_id % ncpu);
1379 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
1380 NULL);
1381 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
1382 intrstr);
1383 if (error == 0)
1384 aprint_normal(", bound queue %d to cpu %d\n",
1385 i, cpu_id);
1386 else
1387 aprint_normal("\n");
1388
1389 #ifndef IXGBE_LEGACY_TX
1390 txr->txq_si = softint_establish(SOFTINT_NET,
1391 ixgbe_deferred_mq_start, txr);
1392 #endif
1393 que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que,
1394 que);
1395 if (que->que_si == NULL) {
1396 aprint_error_dev(dev,
1397 "could not establish software interrupt\n");
1398 }
1399 }
1400
1401 /* and Mailbox */
1402 cpu_id++;
1403 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
1404 sizeof(intrbuf));
1405 #ifdef IXG_MPSAFE
1406 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, true);
1407 #endif
1408 /* Set the mbx handler function */
1409 adapter->osdep.ihs[vector] = pci_intr_establish(pc,
1410 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter);
1411 if (adapter->osdep.ihs[vector] == NULL) {
1412 adapter->res = NULL;
1413 aprint_error_dev(dev, "Failed to register LINK handler\n");
1414 kcpuset_destroy(affinity);
1415 return (ENXIO);
1416 }
1417 /* Round-robin affinity */
1418 kcpuset_zero(affinity);
1419 kcpuset_set(affinity, cpu_id % ncpu);
1420 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
1421
1422 aprint_normal_dev(dev,
1423 "for link, interrupting at %s, ", intrstr);
1424 if (error == 0) {
1425 aprint_normal("affinity to cpu %d\n", cpu_id);
1426 }
1427 adapter->vector = vector;
1428 /* Tasklets for Mailbox */
1429 adapter->link_si = softint_establish(SOFTINT_NET, ixv_handle_mbx,
1430 adapter);
1431 /*
1432 ** Due to a broken design QEMU will fail to properly
1433 ** enable the guest for MSIX unless the vectors in
1434 ** the table are all set up, so we must rewrite the
1435 ** ENABLE in the MSIX control register again at this
1436 ** point to cause it to successfully initialize us.
1437 */
1438 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1439 int msix_ctrl;
1440 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
1441 rid += PCI_MSIX_CTL;
1442 msix_ctrl = pci_conf_read(pc, tag, rid);
1443 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
1444 pci_conf_write(pc, tag, rid, msix_ctrl);
1445 }
1446
1447 return (0);
1448 }
1449
1450 /*
1451 * Setup MSIX resources, note that the VF
1452 * device MUST use MSIX, there is no fallback.
1453 */
1454 static int
1455 ixv_setup_msix(struct adapter *adapter)
1456 {
1457 device_t dev = adapter->dev;
1458 int want, msgs;
1459
1460 /*
1461 ** Want two vectors: one for a queue,
1462 ** plus an additional for mailbox.
1463 */
1464 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
1465 if (msgs < IXG_MSIX_NINTR) {
1466 aprint_error_dev(dev,"MSIX config error\n");
1467 return (ENXIO);
1468 }
1469 want = MIN(msgs, IXG_MSIX_NINTR);
1470
1471 adapter->msix_mem = (void *)1; /* XXX */
1472 aprint_normal_dev(dev,
1473 "Using MSIX interrupts with %d vectors\n", msgs);
1474 return (want);
1475 }
1476
1477
1478 static int
1479 ixv_allocate_pci_resources(struct adapter *adapter,
1480 const struct pci_attach_args *pa)
1481 {
1482 pcireg_t memtype;
1483 device_t dev = adapter->dev;
1484 bus_addr_t addr;
1485 int flags;
1486
1487 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1488
1489 switch (memtype) {
1490 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1491 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1492 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1493 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1494 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1495 goto map_err;
1496 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1497 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1498 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1499 }
1500 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1501 adapter->osdep.mem_size, flags,
1502 &adapter->osdep.mem_bus_space_handle) != 0) {
1503 map_err:
1504 adapter->osdep.mem_size = 0;
1505 aprint_error_dev(dev, "unable to map BAR0\n");
1506 return ENXIO;
1507 }
1508 break;
1509 default:
1510 aprint_error_dev(dev, "unexpected type on BAR0\n");
1511 return ENXIO;
1512 }
1513
1514 /* Pick up the tuneable queues */
1515 adapter->num_queues = ixv_num_queues;
1516
1517 adapter->hw.back = adapter;
1518
1519 /*
1520 ** Now setup MSI/X, should
1521 ** return us the number of
1522 ** configured vectors.
1523 */
1524 adapter->msix = ixv_setup_msix(adapter);
1525 if (adapter->msix == ENXIO)
1526 return (ENXIO);
1527 else
1528 return (0);
1529 }
1530
1531 static void
1532 ixv_free_pci_resources(struct adapter * adapter)
1533 {
1534 struct ix_queue *que = adapter->queues;
1535 int rid;
1536
1537 /*
1538 ** Release all msix queue resources:
1539 */
1540 for (int i = 0; i < adapter->num_queues; i++, que++) {
1541 rid = que->msix + 1;
1542 if (que->res != NULL)
1543 pci_intr_disestablish(adapter->osdep.pc,
1544 adapter->osdep.ihs[i]);
1545 }
1546
1547
1548 /* Clean the Legacy or Link interrupt last */
1549 if (adapter->vector) /* we are doing MSIX */
1550 rid = adapter->vector + 1;
1551 else
1552 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1553
1554 if (adapter->osdep.ihs[rid] != NULL)
1555 pci_intr_disestablish(adapter->osdep.pc,
1556 adapter->osdep.ihs[rid]);
1557 adapter->osdep.ihs[rid] = NULL;
1558
1559 #if defined(NETBSD_MSI_OR_MSIX)
1560 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1561 adapter->osdep.nintrs);
1562 #endif
1563
1564 if (adapter->osdep.mem_size != 0) {
1565 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1566 adapter->osdep.mem_bus_space_handle,
1567 adapter->osdep.mem_size);
1568 }
1569
1570 return;
1571 }
1572
1573 /*********************************************************************
1574 *
1575 * Setup networking device structure and register an interface.
1576 *
1577 **********************************************************************/
1578 static void
1579 ixv_setup_interface(device_t dev, struct adapter *adapter)
1580 {
1581 struct ethercom *ec = &adapter->osdep.ec;
1582 struct ifnet *ifp;
1583
1584 INIT_DEBUGOUT("ixv_setup_interface: begin");
1585
1586 ifp = adapter->ifp = &ec->ec_if;
1587 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1588 ifp->if_baudrate = 1000000000;
1589 ifp->if_init = ixv_init;
1590 ifp->if_stop = ixv_ifstop;
1591 ifp->if_softc = adapter;
1592 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1593 ifp->if_ioctl = ixv_ioctl;
1594 #if __FreeBSD_version >= 800000
1595 ifp->if_transmit = ixgbe_mq_start;
1596 ifp->if_qflush = ixgbe_qflush;
1597 #else
1598 ifp->if_start = ixgbe_start;
1599 #endif
1600 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1601
1602 if_attach(ifp);
1603 ether_ifattach(ifp, adapter->hw.mac.addr);
1604 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1605
1606 adapter->max_frame_size =
1607 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1608
1609 /*
1610 * Tell the upper layer(s) we support long frames.
1611 */
1612 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1613
1614 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4;
1615 ifp->if_capenable = 0;
1616
1617 ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
1618 ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
1619 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1620 | ETHERCAP_VLAN_MTU;
1621 ec->ec_capenable = ec->ec_capabilities;
1622
1623 /* Don't enable LRO by default */
1624 ifp->if_capabilities |= IFCAP_LRO;
1625 #if 0
1626 ifp->if_capenable = ifp->if_capabilities;
1627 #endif
1628
1629 /*
1630 ** Dont turn this on by default, if vlans are
1631 ** created on another pseudo device (eg. lagg)
1632 ** then vlan events are not passed thru, breaking
1633 ** operation, but with HW FILTER off it works. If
1634 ** using vlans directly on the em driver you can
1635 ** enable this and get full hardware tag filtering.
1636 */
1637 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1638
1639 /*
1640 * Specify the media types supported by this adapter and register
1641 * callbacks to update media and link information
1642 */
1643 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1644 ixv_media_status);
1645 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1646 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1647
1648 return;
1649 }
1650
1651 static void
1652 ixv_config_link(struct adapter *adapter)
1653 {
1654 struct ixgbe_hw *hw = &adapter->hw;
1655 u32 autoneg;
1656
1657 if (hw->mac.ops.check_link)
1658 hw->mac.ops.check_link(hw, &autoneg,
1659 &adapter->link_up, FALSE);
1660 }
1661
1662
1663 /*********************************************************************
1664 *
1665 * Enable transmit unit.
1666 *
1667 **********************************************************************/
1668 static void
1669 ixv_initialize_transmit_units(struct adapter *adapter)
1670 {
1671 struct tx_ring *txr = adapter->tx_rings;
1672 struct ixgbe_hw *hw = &adapter->hw;
1673
1674
1675 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1676 u64 tdba = txr->txdma.dma_paddr;
1677 u32 txctrl, txdctl;
1678
1679 /* Set WTHRESH to 8, burst writeback */
1680 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1681 txdctl |= (8 << 16);
1682 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1683
1684 /* Set the HW Tx Head and Tail indices */
1685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1686 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1687
1688 /* Set Tx Tail register */
1689 txr->tail = IXGBE_VFTDT(i);
1690
1691 /* Set Ring parameters */
1692 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1693 (tdba & 0x00000000ffffffffULL));
1694 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1695 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1696 adapter->num_tx_desc *
1697 sizeof(struct ixgbe_legacy_tx_desc));
1698 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1699 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1700 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1701
1702 /* Now enable */
1703 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1704 txdctl |= IXGBE_TXDCTL_ENABLE;
1705 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1706 }
1707
1708 return;
1709 }
1710
1711
1712 /*********************************************************************
1713 *
1714 * Setup receive registers and features.
1715 *
1716 **********************************************************************/
1717 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1718
1719 static void
1720 ixv_initialize_receive_units(struct adapter *adapter)
1721 {
1722 struct rx_ring *rxr = adapter->rx_rings;
1723 struct ixgbe_hw *hw = &adapter->hw;
1724 struct ifnet *ifp = adapter->ifp;
1725 u32 bufsz, rxcsum, psrtype;
1726
1727 if (ifp->if_mtu > ETHERMTU)
1728 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1729 else
1730 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1731
1732 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1733 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1734 IXGBE_PSRTYPE_L2HDR;
1735
1736 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1737
1738 /* Tell PF our max_frame size */
1739 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1740
1741 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1742 u64 rdba = rxr->rxdma.dma_paddr;
1743 u32 reg, rxdctl;
1744
1745 /* Disable the queue */
1746 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1747 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1748 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1749 for (int j = 0; j < 10; j++) {
1750 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1751 IXGBE_RXDCTL_ENABLE)
1752 msec_delay(1);
1753 else
1754 break;
1755 }
1756 wmb();
1757 /* Setup the Base and Length of the Rx Descriptor Ring */
1758 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1759 (rdba & 0x00000000ffffffffULL));
1760 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1761 (rdba >> 32));
1762 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1763 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1764
1765 /* Reset the ring indices */
1766 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1767 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1768
1769 /* Set up the SRRCTL register */
1770 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1771 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1772 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1773 reg |= bufsz;
1774 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1775 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1776
1777 /* Capture Rx Tail index */
1778 rxr->tail = IXGBE_VFRDT(rxr->me);
1779
1780 /* Do the queue enabling last */
1781 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1782 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1783 for (int k = 0; k < 10; k++) {
1784 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1785 IXGBE_RXDCTL_ENABLE)
1786 break;
1787 else
1788 msec_delay(1);
1789 }
1790 wmb();
1791
1792 /* Set the Tail Pointer */
1793 #ifdef DEV_NETMAP
1794 /*
1795 * In netmap mode, we must preserve the buffers made
1796 * available to userspace before the if_init()
1797 * (this is true by default on the TX side, because
1798 * init makes all buffers available to userspace).
1799 *
1800 * netmap_reset() and the device specific routines
1801 * (e.g. ixgbe_setup_receive_rings()) map these
1802 * buffers at the end of the NIC ring, so here we
1803 * must set the RDT (tail) register to make sure
1804 * they are not overwritten.
1805 *
1806 * In this driver the NIC ring starts at RDH = 0,
1807 * RDT points to the last slot available for reception (?),
1808 * so RDT = num_rx_desc - 1 means the whole ring is available.
1809 */
1810 if (ifp->if_capenable & IFCAP_NETMAP) {
1811 struct netmap_adapter *na = NA(adapter->ifp);
1812 struct netmap_kring *kring = &na->rx_rings[i];
1813 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1814
1815 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1816 } else
1817 #endif /* DEV_NETMAP */
1818 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1819 adapter->num_rx_desc - 1);
1820 }
1821
1822 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1823
1824 if (ifp->if_capenable & IFCAP_RXCSUM)
1825 rxcsum |= IXGBE_RXCSUM_PCSD;
1826
1827 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1828 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1829
1830 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1831
1832 return;
1833 }
1834
1835 static void
1836 ixv_setup_vlan_support(struct adapter *adapter)
1837 {
1838 struct ixgbe_hw *hw = &adapter->hw;
1839 u32 ctrl, vid, vfta, retry;
1840 struct rx_ring *rxr;
1841
1842 /*
1843 ** We get here thru init_locked, meaning
1844 ** a soft reset, this has already cleared
1845 ** the VFTA and other state, so if there
1846 ** have been no vlan's registered do nothing.
1847 */
1848 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1849 return;
1850
1851 /* Enable the queues */
1852 for (int i = 0; i < adapter->num_queues; i++) {
1853 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1854 ctrl |= IXGBE_RXDCTL_VME;
1855 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1856 /*
1857 * Let Rx path know that it needs to store VLAN tag
1858 * as part of extra mbuf info.
1859 */
1860 rxr = &adapter->rx_rings[i];
1861 rxr->vtag_strip = TRUE;
1862 }
1863
1864 /*
1865 ** A soft reset zero's out the VFTA, so
1866 ** we need to repopulate it now.
1867 */
1868 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1869 if (ixv_shadow_vfta[i] == 0)
1870 continue;
1871 vfta = ixv_shadow_vfta[i];
1872 /*
1873 ** Reconstruct the vlan id's
1874 ** based on the bits set in each
1875 ** of the array ints.
1876 */
1877 for (int j = 0; j < 32; j++) {
1878 retry = 0;
1879 if ((vfta & (1 << j)) == 0)
1880 continue;
1881 vid = (i * 32) + j;
1882 /* Call the shared code mailbox routine */
1883 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1884 if (++retry > 5)
1885 break;
1886 }
1887 }
1888 }
1889 }
1890
1891 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1892 /*
1893 ** This routine is run via an vlan config EVENT,
1894 ** it enables us to use the HW Filter table since
1895 ** we can get the vlan id. This just creates the
1896 ** entry in the soft version of the VFTA, init will
1897 ** repopulate the real table.
1898 */
1899 static void
1900 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1901 {
1902 struct adapter *adapter = ifp->if_softc;
1903 u16 index, bit;
1904
1905 if (ifp->if_softc != arg) /* Not our event */
1906 return;
1907
1908 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1909 return;
1910
1911 IXGBE_CORE_LOCK(adapter);
1912 index = (vtag >> 5) & 0x7F;
1913 bit = vtag & 0x1F;
1914 ixv_shadow_vfta[index] |= (1 << bit);
1915 /* Re-init to load the changes */
1916 ixv_init_locked(adapter);
1917 IXGBE_CORE_UNLOCK(adapter);
1918 }
1919
1920 /*
1921 ** This routine is run via an vlan
1922 ** unconfig EVENT, remove our entry
1923 ** in the soft vfta.
1924 */
1925 static void
1926 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1927 {
1928 struct adapter *adapter = ifp->if_softc;
1929 u16 index, bit;
1930
1931 if (ifp->if_softc != arg)
1932 return;
1933
1934 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1935 return;
1936
1937 IXGBE_CORE_LOCK(adapter);
1938 index = (vtag >> 5) & 0x7F;
1939 bit = vtag & 0x1F;
1940 ixv_shadow_vfta[index] &= ~(1 << bit);
1941 /* Re-init to load the changes */
1942 ixv_init_locked(adapter);
1943 IXGBE_CORE_UNLOCK(adapter);
1944 }
1945 #endif
1946
1947 static void
1948 ixv_enable_intr(struct adapter *adapter)
1949 {
1950 struct ixgbe_hw *hw = &adapter->hw;
1951 struct ix_queue *que = adapter->queues;
1952 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1953
1954
1955 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1956
1957 mask = IXGBE_EIMS_ENABLE_MASK;
1958 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1959 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1960
1961 for (int i = 0; i < adapter->num_queues; i++, que++)
1962 ixv_enable_queue(adapter, que->msix);
1963
1964 IXGBE_WRITE_FLUSH(hw);
1965
1966 return;
1967 }
1968
1969 static void
1970 ixv_disable_intr(struct adapter *adapter)
1971 {
1972 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1973 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1974 IXGBE_WRITE_FLUSH(&adapter->hw);
1975 return;
1976 }
1977
1978 /*
1979 ** Setup the correct IVAR register for a particular MSIX interrupt
1980 ** - entry is the register array entry
1981 ** - vector is the MSIX vector for this queue
1982 ** - type is RX/TX/MISC
1983 */
1984 static void
1985 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1986 {
1987 struct ixgbe_hw *hw = &adapter->hw;
1988 u32 ivar, index;
1989
1990 vector |= IXGBE_IVAR_ALLOC_VAL;
1991
1992 if (type == -1) { /* MISC IVAR */
1993 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1994 ivar &= ~0xFF;
1995 ivar |= vector;
1996 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1997 } else { /* RX/TX IVARS */
1998 index = (16 * (entry & 1)) + (8 * type);
1999 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2000 ivar &= ~(0xFF << index);
2001 ivar |= (vector << index);
2002 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2003 }
2004 }
2005
2006 static void
2007 ixv_configure_ivars(struct adapter *adapter)
2008 {
2009 struct ix_queue *que = adapter->queues;
2010
2011 for (int i = 0; i < adapter->num_queues; i++, que++) {
2012 /* First the RX queue entry */
2013 ixv_set_ivar(adapter, i, que->msix, 0);
2014 /* ... and the TX */
2015 ixv_set_ivar(adapter, i, que->msix, 1);
2016 /* Set an initial value in EITR */
2017 IXGBE_WRITE_REG(&adapter->hw,
2018 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
2019 }
2020
2021 /* For the mailbox interrupt */
2022 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2023 }
2024
2025
2026 /*
2027 ** Tasklet handler for MSIX MBX interrupts
2028 ** - do outside interrupt since it might sleep
2029 */
2030 static void
2031 ixv_handle_mbx(void *context)
2032 {
2033 struct adapter *adapter = context;
2034
2035 ixgbe_check_link(&adapter->hw,
2036 &adapter->link_speed, &adapter->link_up, 0);
2037 ixv_update_link_status(adapter);
2038 }
2039
2040 /*
2041 ** The VF stats registers never have a truly virgin
2042 ** starting point, so this routine tries to make an
2043 ** artificial one, marking ground zero on attach as
2044 ** it were.
2045 */
2046 static void
2047 ixv_save_stats(struct adapter *adapter)
2048 {
2049 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2050
2051 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2052 stats->saved_reset_vfgprc +=
2053 stats->vfgprc.ev_count - stats->base_vfgprc;
2054 stats->saved_reset_vfgptc +=
2055 stats->vfgptc.ev_count - stats->base_vfgptc;
2056 stats->saved_reset_vfgorc +=
2057 stats->vfgorc.ev_count - stats->base_vfgorc;
2058 stats->saved_reset_vfgotc +=
2059 stats->vfgotc.ev_count - stats->base_vfgotc;
2060 stats->saved_reset_vfmprc +=
2061 stats->vfmprc.ev_count - stats->base_vfmprc;
2062 }
2063 }
2064
2065 static void
2066 ixv_init_stats(struct adapter *adapter)
2067 {
2068 struct ixgbe_hw *hw = &adapter->hw;
2069
2070 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2071 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2072 adapter->stats.vf.last_vfgorc |=
2073 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2074
2075 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2076 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2077 adapter->stats.vf.last_vfgotc |=
2078 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2079
2080 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2081
2082 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2083 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2084 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2085 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2086 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2087 }
2088
2089 #define UPDATE_STAT_32(reg, last, count) \
2090 { \
2091 u32 current = IXGBE_READ_REG(hw, reg); \
2092 if (current < last) \
2093 count.ev_count += 0x100000000LL; \
2094 last = current; \
2095 count.ev_count &= 0xFFFFFFFF00000000LL; \
2096 count.ev_count |= current; \
2097 }
2098
2099 #define UPDATE_STAT_36(lsb, msb, last, count) \
2100 { \
2101 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2102 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2103 u64 current = ((cur_msb << 32) | cur_lsb); \
2104 if (current < last) \
2105 count.ev_count += 0x1000000000LL; \
2106 last = current; \
2107 count.ev_count &= 0xFFFFFFF000000000LL; \
2108 count.ev_count |= current; \
2109 }
2110
2111 /*
2112 ** ixv_update_stats - Update the board statistics counters.
2113 */
2114 void
2115 ixv_update_stats(struct adapter *adapter)
2116 {
2117 struct ixgbe_hw *hw = &adapter->hw;
2118
2119 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2120 adapter->stats.vf.vfgprc);
2121 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2122 adapter->stats.vf.vfgptc);
2123 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2124 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2125 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2126 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2127 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2128 adapter->stats.vf.vfmprc);
2129 }
2130
2131 /*
2132 * Add statistic sysctls for the VF.
2133 */
2134 static void
2135 ixv_add_stats_sysctls(struct adapter *adapter)
2136 {
2137 device_t dev = adapter->dev;
2138 struct ix_queue *que = &adapter->queues[0];
2139 struct tx_ring *txr = que->txr;
2140 struct rx_ring *rxr = que->rxr;
2141
2142 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2143
2144 const char *xname = device_xname(dev);
2145
2146 /* Driver Statistics */
2147 evcnt_attach_dynamic(&adapter->dropped_pkts, EVCNT_TYPE_MISC,
2148 NULL, xname, "Driver dropped packets");
2149 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2150 NULL, xname, "m_defrag() failed");
2151 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2152 NULL, xname, "Watchdog timeouts");
2153
2154 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2155 xname, "Good Packets Received");
2156 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2157 xname, "Good Octets Received");
2158 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2159 xname, "Multicast Packets Received");
2160 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2161 xname, "Good Packets Transmitted");
2162 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2163 xname, "Good Octets Transmitted");
2164 evcnt_attach_dynamic(&que->irqs, EVCNT_TYPE_INTR, NULL,
2165 xname, "IRQs on queue");
2166 evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_INTR, NULL,
2167 xname, "RX irqs on queue");
2168 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, NULL,
2169 xname, "RX packets");
2170 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, NULL,
2171 xname, "RX bytes");
2172 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, NULL,
2173 xname, "Discarded RX packets");
2174 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
2175 xname, "TX Packets");
2176 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
2177 xname, "# of times not enough descriptors were available during TX");
2178 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,
2179 xname, "TX TSO");
2180 }
2181
2182 static void
2183 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2184 const char *description, int *limit, int value)
2185 {
2186 device_t dev = adapter->dev;
2187 struct sysctllog **log;
2188 const struct sysctlnode *rnode, *cnode;
2189
2190 log = &adapter->sysctllog;
2191 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2192 aprint_error_dev(dev, "could not create sysctl root\n");
2193 return;
2194 }
2195 if (sysctl_createv(log, 0, &rnode, &cnode,
2196 CTLFLAG_READWRITE, CTLTYPE_INT,
2197 name, SYSCTL_DESCR(description),
2198 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2199 aprint_error_dev(dev, "could not create sysctl\n");
2200 *limit = value;
2201 }
2202
2203 /**********************************************************************
2204 *
2205 * This routine is called only when em_display_debug_stats is enabled.
2206 * This routine provides a way to take a look at important statistics
2207 * maintained by the driver and hardware.
2208 *
2209 **********************************************************************/
2210 static void
2211 ixv_print_debug_info(struct adapter *adapter)
2212 {
2213 device_t dev = adapter->dev;
2214 struct ixgbe_hw *hw = &adapter->hw;
2215 struct ix_queue *que = adapter->queues;
2216 struct rx_ring *rxr;
2217 struct tx_ring *txr;
2218 #ifdef LRO
2219 struct lro_ctrl *lro;
2220 #endif /* LRO */
2221
2222 device_printf(dev,"Error Byte Count = %u \n",
2223 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2224
2225 for (int i = 0; i < adapter->num_queues; i++, que++) {
2226 txr = que->txr;
2227 rxr = que->rxr;
2228 #ifdef LRO
2229 lro = &rxr->lro;
2230 #endif /* LRO */
2231 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2232 que->msix, (long)que->irqs.ev_count);
2233 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2234 rxr->me, (long long)rxr->rx_packets.ev_count);
2235 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2236 rxr->me, (long)rxr->rx_bytes.ev_count);
2237 #ifdef LRO
2238 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2239 rxr->me, (long long)lro->lro_queued);
2240 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2241 rxr->me, (long long)lro->lro_flushed);
2242 #endif /* LRO */
2243 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2244 txr->me, (long)txr->total_packets.ev_count);
2245 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2246 txr->me, (long)txr->no_desc_avail.ev_count);
2247 }
2248
2249 device_printf(dev,"MBX IRQ Handled: %lu\n",
2250 (long)adapter->link_irq.ev_count);
2251 return;
2252 }
2253
2254 static int
2255 ixv_sysctl_debug(SYSCTLFN_ARGS)
2256 {
2257 struct sysctlnode node;
2258 int error, result;
2259 struct adapter *adapter;
2260
2261 node = *rnode;
2262 adapter = (struct adapter *)node.sysctl_data;
2263 node.sysctl_data = &result;
2264 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2265
2266 if (error)
2267 return error;
2268
2269 if (result == 1)
2270 ixv_print_debug_info(adapter);
2271
2272 return 0;
2273 }
2274
2275 const struct sysctlnode *
2276 ixv_sysctl_instance(struct adapter *adapter)
2277 {
2278 const char *dvname;
2279 struct sysctllog **log;
2280 int rc;
2281 const struct sysctlnode *rnode;
2282
2283 log = &adapter->sysctllog;
2284 dvname = device_xname(adapter->dev);
2285
2286 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2287 0, CTLTYPE_NODE, dvname,
2288 SYSCTL_DESCR("ixv information and settings"),
2289 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2290 goto err;
2291
2292 return rnode;
2293 err:
2294 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2295 return NULL;
2296 }
2297