ixv.c revision 1.82 1 /*$NetBSD: ixv.c,v 1.82 2018/02/26 08:14:01 knakahara Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct ix_queue *, uint32_t);
122
123 static void ixv_setup_vlan_support(struct adapter *);
124 #if 0
125 static void ixv_register_vlan(void *, struct ifnet *, u16);
126 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
127 #endif
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134 static void ixv_set_sysctl_value(struct adapter *, const char *,
135 const char *, int *, int);
136
137 /* The MSI-X Interrupt handlers */
138 static int ixv_msix_que(void *);
139 static int ixv_msix_mbx(void *);
140
141 /* Deferred interrupt tasklets */
142 static void ixv_handle_que(void *);
143 static void ixv_handle_link(void *);
144
145 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
146 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
147
148 /************************************************************************
149 * FreeBSD Device Interface Entry Points
150 ************************************************************************/
151 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
152 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
153 DVF_DETACH_SHUTDOWN);
154
155 #if 0
156 static driver_t ixv_driver = {
157 "ixv", ixv_methods, sizeof(struct adapter),
158 };
159
160 devclass_t ixv_devclass;
161 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
162 MODULE_DEPEND(ixv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixv, ether, 1, 1, 1);
164 #endif
165
166 /*
167 * TUNEABLE PARAMETERS:
168 */
169
170 /* Number of Queues - do not exceed MSI-X vectors - 1 */
171 static int ixv_num_queues = 0;
172 #define TUNABLE_INT(__x, __y)
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174
175 /*
176 * AIM: Adaptive Interrupt Moderation
177 * which means that the interrupt rate
178 * is varied over time based on the
179 * traffic for that interrupt vector
180 */
181 static bool ixv_enable_aim = false;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191
192 /*
193 * Number of TX descriptors per ring,
194 * setting higher than RX as this seems
195 * the better performing choice.
196 */
197 static int ixv_txd = PERFORM_TXD;
198 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
199
200 /* Number of RX descriptors per ring */
201 static int ixv_rxd = PERFORM_RXD;
202 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
203
204 /* Legacy Transmit (single queue) */
205 static int ixv_enable_legacy_tx = 0;
206 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
207
208 #ifdef NET_MPSAFE
209 #define IXGBE_MPSAFE 1
210 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
211 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
212 #else
213 #define IXGBE_CALLOUT_FLAGS 0
214 #define IXGBE_SOFTINFT_FLAGS 0
215 #endif
216
217 #if 0
218 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
219 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
220 #endif
221
222 /************************************************************************
223 * ixv_probe - Device identification routine
224 *
225 * Determines if the driver should be loaded on
226 * adapter based on its PCI vendor/device ID.
227 *
228 * return BUS_PROBE_DEFAULT on success, positive on failure
229 ************************************************************************/
230 static int
231 ixv_probe(device_t dev, cfdata_t cf, void *aux)
232 {
233 #ifdef __HAVE_PCI_MSI_MSIX
234 const struct pci_attach_args *pa = aux;
235
236 return (ixv_lookup(pa) != NULL) ? 1 : 0;
237 #else
238 return 0;
239 #endif
240 } /* ixv_probe */
241
242 static ixgbe_vendor_info_t *
243 ixv_lookup(const struct pci_attach_args *pa)
244 {
245 ixgbe_vendor_info_t *ent;
246 pcireg_t subid;
247
248 INIT_DEBUGOUT("ixv_lookup: begin");
249
250 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
251 return NULL;
252
253 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
254
255 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
256 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
257 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
258 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
260 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
261 (ent->subdevice_id == 0))) {
262 return ent;
263 }
264 }
265
266 return NULL;
267 }
268
269 /************************************************************************
270 * ixv_attach - Device initialization routine
271 *
272 * Called when the driver is being loaded.
273 * Identifies the type of hardware, allocates all resources
274 * and initializes the hardware.
275 *
276 * return 0 on success, positive on failure
277 ************************************************************************/
278 static void
279 ixv_attach(device_t parent, device_t dev, void *aux)
280 {
281 struct adapter *adapter;
282 struct ixgbe_hw *hw;
283 int error = 0;
284 pcireg_t id, subid;
285 ixgbe_vendor_info_t *ent;
286 const struct pci_attach_args *pa = aux;
287 const char *apivstr;
288 const char *str;
289 char buf[256];
290
291 INIT_DEBUGOUT("ixv_attach: begin");
292
293 /*
294 * Make sure BUSMASTER is set, on a VM under
295 * KVM it may not be and will break things.
296 */
297 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
298
299 /* Allocate, clear, and link in our adapter structure */
300 adapter = device_private(dev);
301 adapter->dev = dev;
302 adapter->hw.back = adapter;
303 hw = &adapter->hw;
304
305 adapter->init_locked = ixv_init_locked;
306 adapter->stop_locked = ixv_stop;
307
308 adapter->osdep.pc = pa->pa_pc;
309 adapter->osdep.tag = pa->pa_tag;
310 if (pci_dma64_available(pa))
311 adapter->osdep.dmat = pa->pa_dmat64;
312 else
313 adapter->osdep.dmat = pa->pa_dmat;
314 adapter->osdep.attached = false;
315
316 ent = ixv_lookup(pa);
317
318 KASSERT(ent != NULL);
319
320 aprint_normal(": %s, Version - %s\n",
321 ixv_strings[ent->index], ixv_driver_version);
322
323 /* Core Lock Init*/
324 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
325
326 /* Do base PCI setup - map BAR0 */
327 if (ixv_allocate_pci_resources(adapter, pa)) {
328 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
329 error = ENXIO;
330 goto err_out;
331 }
332
333 /* SYSCTL APIs */
334 ixv_add_device_sysctls(adapter);
335
336 /* Set up the timer callout */
337 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
338
339 /* Save off the information about this board */
340 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
341 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
342 hw->vendor_id = PCI_VENDOR(id);
343 hw->device_id = PCI_PRODUCT(id);
344 hw->revision_id =
345 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
346 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
347 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
348
349 /* A subset of set_mac_type */
350 switch (hw->device_id) {
351 case IXGBE_DEV_ID_82599_VF:
352 hw->mac.type = ixgbe_mac_82599_vf;
353 str = "82599 VF";
354 break;
355 case IXGBE_DEV_ID_X540_VF:
356 hw->mac.type = ixgbe_mac_X540_vf;
357 str = "X540 VF";
358 break;
359 case IXGBE_DEV_ID_X550_VF:
360 hw->mac.type = ixgbe_mac_X550_vf;
361 str = "X550 VF";
362 break;
363 case IXGBE_DEV_ID_X550EM_X_VF:
364 hw->mac.type = ixgbe_mac_X550EM_x_vf;
365 str = "X550EM X VF";
366 break;
367 case IXGBE_DEV_ID_X550EM_A_VF:
368 hw->mac.type = ixgbe_mac_X550EM_a_vf;
369 str = "X550EM A VF";
370 break;
371 default:
372 /* Shouldn't get here since probe succeeded */
373 aprint_error_dev(dev, "Unknown device ID!\n");
374 error = ENXIO;
375 goto err_out;
376 break;
377 }
378 aprint_normal_dev(dev, "device %s\n", str);
379
380 ixv_init_device_features(adapter);
381
382 /* Initialize the shared code */
383 error = ixgbe_init_ops_vf(hw);
384 if (error) {
385 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
386 error = EIO;
387 goto err_out;
388 }
389
390 /* Setup the mailbox */
391 ixgbe_init_mbx_params_vf(hw);
392
393 /* Set the right number of segments */
394 adapter->num_segs = IXGBE_82599_SCATTER;
395
396 /* Reset mbox api to 1.0 */
397 error = hw->mac.ops.reset_hw(hw);
398 if (error == IXGBE_ERR_RESET_FAILED)
399 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
400 else if (error)
401 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
402 error);
403 if (error) {
404 error = EIO;
405 goto err_out;
406 }
407
408 error = hw->mac.ops.init_hw(hw);
409 if (error) {
410 aprint_error_dev(dev, "...init_hw() failed!\n");
411 error = EIO;
412 goto err_out;
413 }
414
415 /* Negotiate mailbox API version */
416 error = ixv_negotiate_api(adapter);
417 if (error)
418 aprint_normal_dev(dev,
419 "MBX API negotiation failed during attach!\n");
420 switch (hw->api_version) {
421 case ixgbe_mbox_api_10:
422 apivstr = "1.0";
423 break;
424 case ixgbe_mbox_api_20:
425 apivstr = "2.0";
426 break;
427 case ixgbe_mbox_api_11:
428 apivstr = "1.1";
429 break;
430 case ixgbe_mbox_api_12:
431 apivstr = "1.2";
432 break;
433 case ixgbe_mbox_api_13:
434 apivstr = "1.3";
435 break;
436 default:
437 apivstr = "unknown";
438 break;
439 }
440 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
441
442 /* If no mac address was assigned, make a random one */
443 if (!ixv_check_ether_addr(hw->mac.addr)) {
444 u8 addr[ETHER_ADDR_LEN];
445 uint64_t rndval = cprng_strong64();
446
447 memcpy(addr, &rndval, sizeof(addr));
448 addr[0] &= 0xFE;
449 addr[0] |= 0x02;
450 bcopy(addr, hw->mac.addr, sizeof(addr));
451 }
452
453 /* Register for VLAN events */
454 #if 0 /* XXX delete after write? */
455 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
456 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
457 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
458 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
459 #endif
460
461 /* Sysctls for limiting the amount of work done in the taskqueues */
462 ixv_set_sysctl_value(adapter, "rx_processing_limit",
463 "max number of rx packets to process",
464 &adapter->rx_process_limit, ixv_rx_process_limit);
465
466 ixv_set_sysctl_value(adapter, "tx_processing_limit",
467 "max number of tx packets to process",
468 &adapter->tx_process_limit, ixv_tx_process_limit);
469
470 /* Do descriptor calc and sanity checks */
471 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
472 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
473 aprint_error_dev(dev, "TXD config issue, using default!\n");
474 adapter->num_tx_desc = DEFAULT_TXD;
475 } else
476 adapter->num_tx_desc = ixv_txd;
477
478 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
479 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
480 aprint_error_dev(dev, "RXD config issue, using default!\n");
481 adapter->num_rx_desc = DEFAULT_RXD;
482 } else
483 adapter->num_rx_desc = ixv_rxd;
484
485 /* Setup MSI-X */
486 error = ixv_configure_interrupts(adapter);
487 if (error)
488 goto err_out;
489
490 /* Allocate our TX/RX Queues */
491 if (ixgbe_allocate_queues(adapter)) {
492 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
493 error = ENOMEM;
494 goto err_out;
495 }
496
497 /* hw.ix defaults init */
498 adapter->enable_aim = ixv_enable_aim;
499
500 error = ixv_allocate_msix(adapter, pa);
501 if (error) {
502 device_printf(dev, "ixv_allocate_msix() failed!\n");
503 goto err_late;
504 }
505
506 /* Setup OS specific network interface */
507 error = ixv_setup_interface(dev, adapter);
508 if (error != 0) {
509 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
510 goto err_late;
511 }
512
513 /* Do the stats setup */
514 ixv_save_stats(adapter);
515 ixv_init_stats(adapter);
516 ixv_add_stats_sysctls(adapter);
517
518 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
519 ixgbe_netmap_attach(adapter);
520
521 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
522 aprint_verbose_dev(dev, "feature cap %s\n", buf);
523 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
524 aprint_verbose_dev(dev, "feature ena %s\n", buf);
525
526 INIT_DEBUGOUT("ixv_attach: end");
527 adapter->osdep.attached = true;
528
529 return;
530
531 err_late:
532 ixgbe_free_transmit_structures(adapter);
533 ixgbe_free_receive_structures(adapter);
534 free(adapter->queues, M_DEVBUF);
535 err_out:
536 ixv_free_pci_resources(adapter);
537 IXGBE_CORE_LOCK_DESTROY(adapter);
538
539 return;
540 } /* ixv_attach */
541
542 /************************************************************************
543 * ixv_detach - Device removal routine
544 *
545 * Called when the driver is being removed.
546 * Stops the adapter and deallocates all the resources
547 * that were allocated for driver operation.
548 *
549 * return 0 on success, positive on failure
550 ************************************************************************/
551 static int
552 ixv_detach(device_t dev, int flags)
553 {
554 struct adapter *adapter = device_private(dev);
555 struct ixgbe_hw *hw = &adapter->hw;
556 struct ix_queue *que = adapter->queues;
557 struct tx_ring *txr = adapter->tx_rings;
558 struct rx_ring *rxr = adapter->rx_rings;
559 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
560
561 INIT_DEBUGOUT("ixv_detach: begin");
562 if (adapter->osdep.attached == false)
563 return 0;
564
565 /* Stop the interface. Callouts are stopped in it. */
566 ixv_ifstop(adapter->ifp, 1);
567
568 #if NVLAN > 0
569 /* Make sure VLANs are not using driver */
570 if (!VLAN_ATTACHED(&adapter->osdep.ec))
571 ; /* nothing to do: no VLANs */
572 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
573 vlan_ifdetach(adapter->ifp);
574 else {
575 aprint_error_dev(dev, "VLANs in use, detach first\n");
576 return EBUSY;
577 }
578 #endif
579
580 IXGBE_CORE_LOCK(adapter);
581 ixv_stop(adapter);
582 IXGBE_CORE_UNLOCK(adapter);
583
584 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
585 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
586 softint_disestablish(txr->txr_si);
587 softint_disestablish(que->que_si);
588 }
589
590 /* Drain the Mailbox(link) queue */
591 softint_disestablish(adapter->link_si);
592
593 /* Unregister VLAN events */
594 #if 0 /* XXX msaitoh delete after write? */
595 if (adapter->vlan_attach != NULL)
596 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
597 if (adapter->vlan_detach != NULL)
598 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
599 #endif
600
601 ether_ifdetach(adapter->ifp);
602 callout_halt(&adapter->timer, NULL);
603
604 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
605 netmap_detach(adapter->ifp);
606
607 ixv_free_pci_resources(adapter);
608 #if 0 /* XXX the NetBSD port is probably missing something here */
609 bus_generic_detach(dev);
610 #endif
611 if_detach(adapter->ifp);
612 if_percpuq_destroy(adapter->ipq);
613
614 sysctl_teardown(&adapter->sysctllog);
615 evcnt_detach(&adapter->handleq);
616 evcnt_detach(&adapter->req);
617 evcnt_detach(&adapter->efbig_tx_dma_setup);
618 evcnt_detach(&adapter->mbuf_defrag_failed);
619 evcnt_detach(&adapter->efbig2_tx_dma_setup);
620 evcnt_detach(&adapter->einval_tx_dma_setup);
621 evcnt_detach(&adapter->other_tx_dma_setup);
622 evcnt_detach(&adapter->eagain_tx_dma_setup);
623 evcnt_detach(&adapter->enomem_tx_dma_setup);
624 evcnt_detach(&adapter->watchdog_events);
625 evcnt_detach(&adapter->tso_err);
626 evcnt_detach(&adapter->link_irq);
627
628 txr = adapter->tx_rings;
629 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
630 evcnt_detach(&adapter->queues[i].irqs);
631 evcnt_detach(&txr->no_desc_avail);
632 evcnt_detach(&txr->total_packets);
633 evcnt_detach(&txr->tso_tx);
634 #ifndef IXGBE_LEGACY_TX
635 evcnt_detach(&txr->pcq_drops);
636 #endif
637
638 evcnt_detach(&rxr->rx_packets);
639 evcnt_detach(&rxr->rx_bytes);
640 evcnt_detach(&rxr->rx_copies);
641 evcnt_detach(&rxr->no_jmbuf);
642 evcnt_detach(&rxr->rx_discarded);
643 }
644 evcnt_detach(&stats->ipcs);
645 evcnt_detach(&stats->l4cs);
646 evcnt_detach(&stats->ipcs_bad);
647 evcnt_detach(&stats->l4cs_bad);
648
649 /* Packet Reception Stats */
650 evcnt_detach(&stats->vfgorc);
651 evcnt_detach(&stats->vfgprc);
652 evcnt_detach(&stats->vfmprc);
653
654 /* Packet Transmission Stats */
655 evcnt_detach(&stats->vfgotc);
656 evcnt_detach(&stats->vfgptc);
657
658 /* Mailbox Stats */
659 evcnt_detach(&hw->mbx.stats.msgs_tx);
660 evcnt_detach(&hw->mbx.stats.msgs_rx);
661 evcnt_detach(&hw->mbx.stats.acks);
662 evcnt_detach(&hw->mbx.stats.reqs);
663 evcnt_detach(&hw->mbx.stats.rsts);
664
665 ixgbe_free_transmit_structures(adapter);
666 ixgbe_free_receive_structures(adapter);
667 for (int i = 0; i < adapter->num_queues; i++) {
668 struct ix_queue *lque = &adapter->queues[i];
669 mutex_destroy(&lque->im_mtx);
670 }
671 free(adapter->queues, M_DEVBUF);
672
673 IXGBE_CORE_LOCK_DESTROY(adapter);
674
675 return (0);
676 } /* ixv_detach */
677
678 /************************************************************************
679 * ixv_init_locked - Init entry point
680 *
681 * Used in two ways: It is used by the stack as an init entry
682 * point in network interface structure. It is also used
683 * by the driver as a hw/sw initialization routine to get
684 * to a consistent state.
685 *
686 * return 0 on success, positive on failure
687 ************************************************************************/
688 static void
689 ixv_init_locked(struct adapter *adapter)
690 {
691 struct ifnet *ifp = adapter->ifp;
692 device_t dev = adapter->dev;
693 struct ixgbe_hw *hw = &adapter->hw;
694 struct ix_queue *que = adapter->queues;
695 int error = 0;
696 uint32_t mask;
697 int i;
698
699 INIT_DEBUGOUT("ixv_init_locked: begin");
700 KASSERT(mutex_owned(&adapter->core_mtx));
701 hw->adapter_stopped = FALSE;
702 hw->mac.ops.stop_adapter(hw);
703 callout_stop(&adapter->timer);
704
705 /* reprogram the RAR[0] in case user changed it. */
706 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
707
708 /* Get the latest mac address, User can use a LAA */
709 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
710 IXGBE_ETH_LENGTH_OF_ADDRESS);
711 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
712
713 /* Prepare transmit descriptors and buffers */
714 if (ixgbe_setup_transmit_structures(adapter)) {
715 aprint_error_dev(dev, "Could not setup transmit structures\n");
716 ixv_stop(adapter);
717 return;
718 }
719
720 /* Reset VF and renegotiate mailbox API version */
721 hw->mac.ops.reset_hw(hw);
722 error = ixv_negotiate_api(adapter);
723 if (error)
724 device_printf(dev,
725 "Mailbox API negotiation failed in init_locked!\n");
726
727 ixv_initialize_transmit_units(adapter);
728
729 /* Setup Multicast table */
730 ixv_set_multi(adapter);
731
732 /*
733 * Determine the correct mbuf pool
734 * for doing jumbo/headersplit
735 */
736 if (ifp->if_mtu > ETHERMTU)
737 adapter->rx_mbuf_sz = MJUMPAGESIZE;
738 else
739 adapter->rx_mbuf_sz = MCLBYTES;
740
741 /* Prepare receive descriptors and buffers */
742 if (ixgbe_setup_receive_structures(adapter)) {
743 device_printf(dev, "Could not setup receive structures\n");
744 ixv_stop(adapter);
745 return;
746 }
747
748 /* Configure RX settings */
749 ixv_initialize_receive_units(adapter);
750
751 #if 0 /* XXX isn't it required? -- msaitoh */
752 /* Set the various hardware offload abilities */
753 ifp->if_hwassist = 0;
754 if (ifp->if_capenable & IFCAP_TSO4)
755 ifp->if_hwassist |= CSUM_TSO;
756 if (ifp->if_capenable & IFCAP_TXCSUM) {
757 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
758 #if __FreeBSD_version >= 800000
759 ifp->if_hwassist |= CSUM_SCTP;
760 #endif
761 }
762 #endif
763
764 /* Set up VLAN offload and filter */
765 ixv_setup_vlan_support(adapter);
766
767 /* Set up MSI-X routing */
768 ixv_configure_ivars(adapter);
769
770 /* Set up auto-mask */
771 mask = (1 << adapter->vector);
772 for (i = 0; i < adapter->num_queues; i++, que++)
773 mask |= (1 << que->msix);
774 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
775
776 /* Set moderation on the Link interrupt */
777 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
778
779 /* Stats init */
780 ixv_init_stats(adapter);
781
782 /* Config/Enable Link */
783 hw->mac.get_link_status = TRUE;
784 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
785 FALSE);
786
787 /* Start watchdog */
788 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
789
790 /* And now turn on interrupts */
791 ixv_enable_intr(adapter);
792
793 /* Update saved flags. See ixgbe_ifflags_cb() */
794 adapter->if_flags = ifp->if_flags;
795
796 /* Now inform the stack we're ready */
797 ifp->if_flags |= IFF_RUNNING;
798 ifp->if_flags &= ~IFF_OACTIVE;
799
800 return;
801 } /* ixv_init_locked */
802
803 /*
804 * MSI-X Interrupt Handlers and Tasklets
805 */
806
807 static inline void
808 ixv_enable_queue(struct adapter *adapter, u32 vector)
809 {
810 struct ixgbe_hw *hw = &adapter->hw;
811 struct ix_queue *que = &adapter->queues[vector];
812 u32 queue = 1 << vector;
813 u32 mask;
814
815 mutex_enter(&que->im_mtx);
816 if (que->im_nest > 0 && --que->im_nest > 0)
817 goto out;
818
819 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
820 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
821 out:
822 mutex_exit(&que->im_mtx);
823 } /* ixv_enable_queue */
824
825 static inline void
826 ixv_disable_queue(struct adapter *adapter, u32 vector)
827 {
828 struct ixgbe_hw *hw = &adapter->hw;
829 struct ix_queue *que = &adapter->queues[vector];
830 u64 queue = (u64)(1 << vector);
831 u32 mask;
832
833 mutex_enter(&que->im_mtx);
834 if (que->im_nest++ > 0)
835 goto out;
836
837 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
838 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
839 out:
840 mutex_exit(&que->im_mtx);
841 } /* ixv_disable_queue */
842
843 static inline void
844 ixv_rearm_queues(struct adapter *adapter, u64 queues)
845 {
846 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
847 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
848 } /* ixv_rearm_queues */
849
850
851 /************************************************************************
852 * ixv_msix_que - MSI Queue Interrupt Service routine
853 ************************************************************************/
854 static int
855 ixv_msix_que(void *arg)
856 {
857 struct ix_queue *que = arg;
858 struct adapter *adapter = que->adapter;
859 struct tx_ring *txr = que->txr;
860 struct rx_ring *rxr = que->rxr;
861 bool more;
862 u32 newitr = 0;
863
864 ixv_disable_queue(adapter, que->msix);
865 ++que->irqs.ev_count;
866
867 #ifdef __NetBSD__
868 /* Don't run ixgbe_rxeof in interrupt context */
869 more = true;
870 #else
871 more = ixgbe_rxeof(que);
872 #endif
873
874 IXGBE_TX_LOCK(txr);
875 ixgbe_txeof(txr);
876 IXGBE_TX_UNLOCK(txr);
877
878 /* Do AIM now? */
879
880 if (adapter->enable_aim == false)
881 goto no_calc;
882 /*
883 * Do Adaptive Interrupt Moderation:
884 * - Write out last calculated setting
885 * - Calculate based on average size over
886 * the last interval.
887 */
888 if (que->eitr_setting)
889 ixv_eitr_write(que, que->eitr_setting);
890
891 que->eitr_setting = 0;
892
893 /* Idle, do nothing */
894 if ((txr->bytes == 0) && (rxr->bytes == 0))
895 goto no_calc;
896
897 if ((txr->bytes) && (txr->packets))
898 newitr = txr->bytes/txr->packets;
899 if ((rxr->bytes) && (rxr->packets))
900 newitr = max(newitr, (rxr->bytes / rxr->packets));
901 newitr += 24; /* account for hardware frame, crc */
902
903 /* set an upper boundary */
904 newitr = min(newitr, 3000);
905
906 /* Be nice to the mid range */
907 if ((newitr > 300) && (newitr < 1200))
908 newitr = (newitr / 3);
909 else
910 newitr = (newitr / 2);
911
912 /*
913 * When RSC is used, ITR interval must be larger than RSC_DELAY.
914 * Currently, we use 2us for RSC_DELAY. The minimum value is always
915 * greater than 2us on 100M (and 10M?(not documented)), but it's not
916 * on 1G and higher.
917 */
918 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
919 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
920 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
921 newitr = IXGBE_MIN_RSC_EITR_10G1G;
922 }
923
924 /* save for next interrupt */
925 que->eitr_setting = newitr;
926
927 /* Reset state */
928 txr->bytes = 0;
929 txr->packets = 0;
930 rxr->bytes = 0;
931 rxr->packets = 0;
932
933 no_calc:
934 if (more)
935 softint_schedule(que->que_si);
936 else /* Re-enable this interrupt */
937 ixv_enable_queue(adapter, que->msix);
938
939 return 1;
940 } /* ixv_msix_que */
941
942 /************************************************************************
943 * ixv_msix_mbx
944 ************************************************************************/
945 static int
946 ixv_msix_mbx(void *arg)
947 {
948 struct adapter *adapter = arg;
949 struct ixgbe_hw *hw = &adapter->hw;
950
951 ++adapter->link_irq.ev_count;
952 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
953
954 /* Link status change */
955 hw->mac.get_link_status = TRUE;
956 softint_schedule(adapter->link_si);
957
958 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
959
960 return 1;
961 } /* ixv_msix_mbx */
962
963 static void
964 ixv_eitr_write(struct ix_queue *que, uint32_t itr)
965 {
966 struct adapter *adapter = que->adapter;
967
968 /*
969 * Newer devices than 82598 have VF function, so this function is
970 * simple.
971 */
972 itr |= IXGBE_EITR_CNT_WDIS;
973
974 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), itr);
975 }
976
977
978 /************************************************************************
979 * ixv_media_status - Media Ioctl callback
980 *
981 * Called whenever the user queries the status of
982 * the interface using ifconfig.
983 ************************************************************************/
984 static void
985 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
986 {
987 struct adapter *adapter = ifp->if_softc;
988
989 INIT_DEBUGOUT("ixv_media_status: begin");
990 IXGBE_CORE_LOCK(adapter);
991 ixv_update_link_status(adapter);
992
993 ifmr->ifm_status = IFM_AVALID;
994 ifmr->ifm_active = IFM_ETHER;
995
996 if (!adapter->link_active) {
997 ifmr->ifm_active |= IFM_NONE;
998 IXGBE_CORE_UNLOCK(adapter);
999 return;
1000 }
1001
1002 ifmr->ifm_status |= IFM_ACTIVE;
1003
1004 switch (adapter->link_speed) {
1005 case IXGBE_LINK_SPEED_10GB_FULL:
1006 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1007 break;
1008 case IXGBE_LINK_SPEED_5GB_FULL:
1009 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1010 break;
1011 case IXGBE_LINK_SPEED_2_5GB_FULL:
1012 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1013 break;
1014 case IXGBE_LINK_SPEED_1GB_FULL:
1015 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1016 break;
1017 case IXGBE_LINK_SPEED_100_FULL:
1018 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1019 break;
1020 case IXGBE_LINK_SPEED_10_FULL:
1021 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1022 break;
1023 }
1024
1025 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1026
1027 IXGBE_CORE_UNLOCK(adapter);
1028
1029 return;
1030 } /* ixv_media_status */
1031
1032 /************************************************************************
1033 * ixv_media_change - Media Ioctl callback
1034 *
1035 * Called when the user changes speed/duplex using
1036 * media/mediopt option with ifconfig.
1037 ************************************************************************/
1038 static int
1039 ixv_media_change(struct ifnet *ifp)
1040 {
1041 struct adapter *adapter = ifp->if_softc;
1042 struct ifmedia *ifm = &adapter->media;
1043
1044 INIT_DEBUGOUT("ixv_media_change: begin");
1045
1046 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1047 return (EINVAL);
1048
1049 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1050 case IFM_AUTO:
1051 break;
1052 default:
1053 device_printf(adapter->dev, "Only auto media type\n");
1054 return (EINVAL);
1055 }
1056
1057 return (0);
1058 } /* ixv_media_change */
1059
1060
1061 /************************************************************************
1062 * ixv_negotiate_api
1063 *
1064 * Negotiate the Mailbox API with the PF;
1065 * start with the most featured API first.
1066 ************************************************************************/
1067 static int
1068 ixv_negotiate_api(struct adapter *adapter)
1069 {
1070 struct ixgbe_hw *hw = &adapter->hw;
1071 int mbx_api[] = { ixgbe_mbox_api_11,
1072 ixgbe_mbox_api_10,
1073 ixgbe_mbox_api_unknown };
1074 int i = 0;
1075
1076 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1077 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1078 return (0);
1079 i++;
1080 }
1081
1082 return (EINVAL);
1083 } /* ixv_negotiate_api */
1084
1085
1086 /************************************************************************
1087 * ixv_set_multi - Multicast Update
1088 *
1089 * Called whenever multicast address list is updated.
1090 ************************************************************************/
1091 static void
1092 ixv_set_multi(struct adapter *adapter)
1093 {
1094 struct ether_multi *enm;
1095 struct ether_multistep step;
1096 struct ethercom *ec = &adapter->osdep.ec;
1097 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1098 u8 *update_ptr;
1099 int mcnt = 0;
1100
1101 KASSERT(mutex_owned(&adapter->core_mtx));
1102 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1103
1104 ETHER_LOCK(ec);
1105 ETHER_FIRST_MULTI(step, ec, enm);
1106 while (enm != NULL) {
1107 bcopy(enm->enm_addrlo,
1108 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1109 IXGBE_ETH_LENGTH_OF_ADDRESS);
1110 mcnt++;
1111 /* XXX This might be required --msaitoh */
1112 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1113 break;
1114 ETHER_NEXT_MULTI(step, enm);
1115 }
1116 ETHER_UNLOCK(ec);
1117
1118 update_ptr = mta;
1119
1120 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1121 ixv_mc_array_itr, TRUE);
1122
1123 return;
1124 } /* ixv_set_multi */
1125
1126 /************************************************************************
1127 * ixv_mc_array_itr
1128 *
1129 * An iterator function needed by the multicast shared code.
1130 * It feeds the shared code routine the addresses in the
1131 * array of ixv_set_multi() one by one.
1132 ************************************************************************/
1133 static u8 *
1134 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1135 {
1136 u8 *addr = *update_ptr;
1137 u8 *newptr;
1138 *vmdq = 0;
1139
1140 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1141 *update_ptr = newptr;
1142
1143 return addr;
1144 } /* ixv_mc_array_itr */
1145
1146 /************************************************************************
1147 * ixv_local_timer - Timer routine
1148 *
1149 * Checks for link status, updates statistics,
1150 * and runs the watchdog check.
1151 ************************************************************************/
1152 static void
1153 ixv_local_timer(void *arg)
1154 {
1155 struct adapter *adapter = arg;
1156
1157 IXGBE_CORE_LOCK(adapter);
1158 ixv_local_timer_locked(adapter);
1159 IXGBE_CORE_UNLOCK(adapter);
1160 }
1161
1162 static void
1163 ixv_local_timer_locked(void *arg)
1164 {
1165 struct adapter *adapter = arg;
1166 device_t dev = adapter->dev;
1167 struct ix_queue *que = adapter->queues;
1168 u64 queues = 0;
1169 int hung = 0;
1170
1171 KASSERT(mutex_owned(&adapter->core_mtx));
1172
1173 ixv_check_link(adapter);
1174
1175 /* Stats Update */
1176 ixv_update_stats(adapter);
1177
1178 /*
1179 * Check the TX queues status
1180 * - mark hung queues so we don't schedule on them
1181 * - watchdog only if all queues show hung
1182 */
1183 for (int i = 0; i < adapter->num_queues; i++, que++) {
1184 /* Keep track of queues with work for soft irq */
1185 if (que->txr->busy)
1186 queues |= ((u64)1 << que->me);
1187 /*
1188 * Each time txeof runs without cleaning, but there
1189 * are uncleaned descriptors it increments busy. If
1190 * we get to the MAX we declare it hung.
1191 */
1192 if (que->busy == IXGBE_QUEUE_HUNG) {
1193 ++hung;
1194 /* Mark the queue as inactive */
1195 adapter->active_queues &= ~((u64)1 << que->me);
1196 continue;
1197 } else {
1198 /* Check if we've come back from hung */
1199 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1200 adapter->active_queues |= ((u64)1 << que->me);
1201 }
1202 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1203 device_printf(dev,
1204 "Warning queue %d appears to be hung!\n", i);
1205 que->txr->busy = IXGBE_QUEUE_HUNG;
1206 ++hung;
1207 }
1208 }
1209
1210 /* Only truly watchdog if all queues show hung */
1211 if (hung == adapter->num_queues)
1212 goto watchdog;
1213 else if (queues != 0) { /* Force an IRQ on queues with work */
1214 ixv_rearm_queues(adapter, queues);
1215 }
1216
1217 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1218
1219 return;
1220
1221 watchdog:
1222
1223 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1224 adapter->ifp->if_flags &= ~IFF_RUNNING;
1225 adapter->watchdog_events.ev_count++;
1226 ixv_init_locked(adapter);
1227 } /* ixv_local_timer */
1228
1229 /************************************************************************
1230 * ixv_update_link_status - Update OS on link state
1231 *
1232 * Note: Only updates the OS on the cached link state.
1233 * The real check of the hardware only happens with
1234 * a link interrupt.
1235 ************************************************************************/
1236 static void
1237 ixv_update_link_status(struct adapter *adapter)
1238 {
1239 struct ifnet *ifp = adapter->ifp;
1240 device_t dev = adapter->dev;
1241
1242 if (adapter->link_up) {
1243 if (adapter->link_active == FALSE) {
1244 if (bootverbose) {
1245 const char *bpsmsg;
1246
1247 switch (adapter->link_speed) {
1248 case IXGBE_LINK_SPEED_10GB_FULL:
1249 bpsmsg = "10 Gbps";
1250 break;
1251 case IXGBE_LINK_SPEED_5GB_FULL:
1252 bpsmsg = "5 Gbps";
1253 break;
1254 case IXGBE_LINK_SPEED_2_5GB_FULL:
1255 bpsmsg = "2.5 Gbps";
1256 break;
1257 case IXGBE_LINK_SPEED_1GB_FULL:
1258 bpsmsg = "1 Gbps";
1259 break;
1260 case IXGBE_LINK_SPEED_100_FULL:
1261 bpsmsg = "100 Mbps";
1262 break;
1263 case IXGBE_LINK_SPEED_10_FULL:
1264 bpsmsg = "10 Mbps";
1265 break;
1266 default:
1267 bpsmsg = "unknown speed";
1268 break;
1269 }
1270 device_printf(dev, "Link is up %s %s \n",
1271 bpsmsg, "Full Duplex");
1272 }
1273 adapter->link_active = TRUE;
1274 if_link_state_change(ifp, LINK_STATE_UP);
1275 }
1276 } else { /* Link down */
1277 if (adapter->link_active == TRUE) {
1278 if (bootverbose)
1279 device_printf(dev, "Link is Down\n");
1280 if_link_state_change(ifp, LINK_STATE_DOWN);
1281 adapter->link_active = FALSE;
1282 }
1283 }
1284
1285 return;
1286 } /* ixv_update_link_status */
1287
1288
1289 /************************************************************************
1290 * ixv_stop - Stop the hardware
1291 *
1292 * Disables all traffic on the adapter by issuing a
1293 * global reset on the MAC and deallocates TX/RX buffers.
1294 ************************************************************************/
1295 static void
1296 ixv_ifstop(struct ifnet *ifp, int disable)
1297 {
1298 struct adapter *adapter = ifp->if_softc;
1299
1300 IXGBE_CORE_LOCK(adapter);
1301 ixv_stop(adapter);
1302 IXGBE_CORE_UNLOCK(adapter);
1303 }
1304
1305 static void
1306 ixv_stop(void *arg)
1307 {
1308 struct ifnet *ifp;
1309 struct adapter *adapter = arg;
1310 struct ixgbe_hw *hw = &adapter->hw;
1311
1312 ifp = adapter->ifp;
1313
1314 KASSERT(mutex_owned(&adapter->core_mtx));
1315
1316 INIT_DEBUGOUT("ixv_stop: begin\n");
1317 ixv_disable_intr(adapter);
1318
1319 /* Tell the stack that the interface is no longer active */
1320 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1321
1322 hw->mac.ops.reset_hw(hw);
1323 adapter->hw.adapter_stopped = FALSE;
1324 hw->mac.ops.stop_adapter(hw);
1325 callout_stop(&adapter->timer);
1326
1327 /* reprogram the RAR[0] in case user changed it. */
1328 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1329
1330 return;
1331 } /* ixv_stop */
1332
1333
1334 /************************************************************************
1335 * ixv_allocate_pci_resources
1336 ************************************************************************/
1337 static int
1338 ixv_allocate_pci_resources(struct adapter *adapter,
1339 const struct pci_attach_args *pa)
1340 {
1341 pcireg_t memtype;
1342 device_t dev = adapter->dev;
1343 bus_addr_t addr;
1344 int flags;
1345
1346 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1347 switch (memtype) {
1348 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1349 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1350 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1351 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1352 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1353 goto map_err;
1354 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1355 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1356 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1357 }
1358 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1359 adapter->osdep.mem_size, flags,
1360 &adapter->osdep.mem_bus_space_handle) != 0) {
1361 map_err:
1362 adapter->osdep.mem_size = 0;
1363 aprint_error_dev(dev, "unable to map BAR0\n");
1364 return ENXIO;
1365 }
1366 break;
1367 default:
1368 aprint_error_dev(dev, "unexpected type on BAR0\n");
1369 return ENXIO;
1370 }
1371
1372 /* Pick up the tuneable queues */
1373 adapter->num_queues = ixv_num_queues;
1374
1375 return (0);
1376 } /* ixv_allocate_pci_resources */
1377
1378 /************************************************************************
1379 * ixv_free_pci_resources
1380 ************************************************************************/
1381 static void
1382 ixv_free_pci_resources(struct adapter * adapter)
1383 {
1384 struct ix_queue *que = adapter->queues;
1385 int rid;
1386
1387 /*
1388 * Release all msix queue resources:
1389 */
1390 for (int i = 0; i < adapter->num_queues; i++, que++) {
1391 if (que->res != NULL)
1392 pci_intr_disestablish(adapter->osdep.pc,
1393 adapter->osdep.ihs[i]);
1394 }
1395
1396
1397 /* Clean the Mailbox interrupt last */
1398 rid = adapter->vector;
1399
1400 if (adapter->osdep.ihs[rid] != NULL) {
1401 pci_intr_disestablish(adapter->osdep.pc,
1402 adapter->osdep.ihs[rid]);
1403 adapter->osdep.ihs[rid] = NULL;
1404 }
1405
1406 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1407 adapter->osdep.nintrs);
1408
1409 if (adapter->osdep.mem_size != 0) {
1410 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1411 adapter->osdep.mem_bus_space_handle,
1412 adapter->osdep.mem_size);
1413 }
1414
1415 return;
1416 } /* ixv_free_pci_resources */
1417
1418 /************************************************************************
1419 * ixv_setup_interface
1420 *
1421 * Setup networking device structure and register an interface.
1422 ************************************************************************/
1423 static int
1424 ixv_setup_interface(device_t dev, struct adapter *adapter)
1425 {
1426 struct ethercom *ec = &adapter->osdep.ec;
1427 struct ifnet *ifp;
1428 int rv;
1429
1430 INIT_DEBUGOUT("ixv_setup_interface: begin");
1431
1432 ifp = adapter->ifp = &ec->ec_if;
1433 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1434 ifp->if_baudrate = IF_Gbps(10);
1435 ifp->if_init = ixv_init;
1436 ifp->if_stop = ixv_ifstop;
1437 ifp->if_softc = adapter;
1438 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1439 #ifdef IXGBE_MPSAFE
1440 ifp->if_extflags = IFEF_MPSAFE;
1441 #endif
1442 ifp->if_ioctl = ixv_ioctl;
1443 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1444 #if 0
1445 ixv_start_locked = ixgbe_legacy_start_locked;
1446 #endif
1447 } else {
1448 ifp->if_transmit = ixgbe_mq_start;
1449 #if 0
1450 ixv_start_locked = ixgbe_mq_start_locked;
1451 #endif
1452 }
1453 ifp->if_start = ixgbe_legacy_start;
1454 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1455 IFQ_SET_READY(&ifp->if_snd);
1456
1457 rv = if_initialize(ifp);
1458 if (rv != 0) {
1459 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1460 return rv;
1461 }
1462 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1463 ether_ifattach(ifp, adapter->hw.mac.addr);
1464 /*
1465 * We use per TX queue softint, so if_deferred_start_init() isn't
1466 * used.
1467 */
1468 if_register(ifp);
1469 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1470
1471 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1472
1473 /*
1474 * Tell the upper layer(s) we support long frames.
1475 */
1476 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1477
1478 /* Set capability flags */
1479 ifp->if_capabilities |= IFCAP_HWCSUM
1480 | IFCAP_TSOv4
1481 | IFCAP_TSOv6;
1482 ifp->if_capenable = 0;
1483
1484 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1485 | ETHERCAP_VLAN_HWCSUM
1486 | ETHERCAP_JUMBO_MTU
1487 | ETHERCAP_VLAN_MTU;
1488
1489 /* Enable the above capabilities by default */
1490 ec->ec_capenable = ec->ec_capabilities;
1491
1492 /* Don't enable LRO by default */
1493 ifp->if_capabilities |= IFCAP_LRO;
1494 #if 0
1495 ifp->if_capenable = ifp->if_capabilities;
1496 #endif
1497
1498 /*
1499 * Specify the media types supported by this adapter and register
1500 * callbacks to update media and link information
1501 */
1502 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1503 ixv_media_status);
1504 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1505 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1506
1507 return 0;
1508 } /* ixv_setup_interface */
1509
1510
1511 /************************************************************************
1512 * ixv_initialize_transmit_units - Enable transmit unit.
1513 ************************************************************************/
1514 static void
1515 ixv_initialize_transmit_units(struct adapter *adapter)
1516 {
1517 struct tx_ring *txr = adapter->tx_rings;
1518 struct ixgbe_hw *hw = &adapter->hw;
1519
1520
1521 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1522 u64 tdba = txr->txdma.dma_paddr;
1523 u32 txctrl, txdctl;
1524
1525 /* Set WTHRESH to 8, burst writeback */
1526 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1527 txdctl |= (8 << 16);
1528 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1529
1530 /* Set the HW Tx Head and Tail indices */
1531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1533
1534 /* Set Tx Tail register */
1535 txr->tail = IXGBE_VFTDT(i);
1536
1537 /* Set Ring parameters */
1538 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1539 (tdba & 0x00000000ffffffffULL));
1540 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1541 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1542 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1543 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1544 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1545 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1546
1547 /* Now enable */
1548 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1549 txdctl |= IXGBE_TXDCTL_ENABLE;
1550 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1551 }
1552
1553 return;
1554 } /* ixv_initialize_transmit_units */
1555
1556
1557 /************************************************************************
1558 * ixv_initialize_rss_mapping
1559 ************************************************************************/
1560 static void
1561 ixv_initialize_rss_mapping(struct adapter *adapter)
1562 {
1563 struct ixgbe_hw *hw = &adapter->hw;
1564 u32 reta = 0, mrqc, rss_key[10];
1565 int queue_id;
1566 int i, j;
1567 u32 rss_hash_config;
1568
1569 /* force use default RSS key. */
1570 #ifdef __NetBSD__
1571 rss_getkey((uint8_t *) &rss_key);
1572 #else
1573 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1574 /* Fetch the configured RSS key */
1575 rss_getkey((uint8_t *)&rss_key);
1576 } else {
1577 /* set up random bits */
1578 cprng_fast(&rss_key, sizeof(rss_key));
1579 }
1580 #endif
1581
1582 /* Now fill out hash function seeds */
1583 for (i = 0; i < 10; i++)
1584 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1585
1586 /* Set up the redirection table */
1587 for (i = 0, j = 0; i < 64; i++, j++) {
1588 if (j == adapter->num_queues)
1589 j = 0;
1590
1591 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1592 /*
1593 * Fetch the RSS bucket id for the given indirection
1594 * entry. Cap it at the number of configured buckets
1595 * (which is num_queues.)
1596 */
1597 queue_id = rss_get_indirection_to_bucket(i);
1598 queue_id = queue_id % adapter->num_queues;
1599 } else
1600 queue_id = j;
1601
1602 /*
1603 * The low 8 bits are for hash value (n+0);
1604 * The next 8 bits are for hash value (n+1), etc.
1605 */
1606 reta >>= 8;
1607 reta |= ((uint32_t)queue_id) << 24;
1608 if ((i & 3) == 3) {
1609 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1610 reta = 0;
1611 }
1612 }
1613
1614 /* Perform hash on these packet types */
1615 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1616 rss_hash_config = rss_gethashconfig();
1617 else {
1618 /*
1619 * Disable UDP - IP fragments aren't currently being handled
1620 * and so we end up with a mix of 2-tuple and 4-tuple
1621 * traffic.
1622 */
1623 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1624 | RSS_HASHTYPE_RSS_TCP_IPV4
1625 | RSS_HASHTYPE_RSS_IPV6
1626 | RSS_HASHTYPE_RSS_TCP_IPV6;
1627 }
1628
1629 mrqc = IXGBE_MRQC_RSSEN;
1630 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1631 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1632 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1633 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1634 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1635 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1636 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1637 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1638 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1639 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1640 __func__);
1641 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1642 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1643 __func__);
1644 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1645 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1646 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1647 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1648 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1649 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1650 __func__);
1651 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1652 } /* ixv_initialize_rss_mapping */
1653
1654
1655 /************************************************************************
1656 * ixv_initialize_receive_units - Setup receive registers and features.
1657 ************************************************************************/
1658 static void
1659 ixv_initialize_receive_units(struct adapter *adapter)
1660 {
1661 struct rx_ring *rxr = adapter->rx_rings;
1662 struct ixgbe_hw *hw = &adapter->hw;
1663 struct ifnet *ifp = adapter->ifp;
1664 u32 bufsz, rxcsum, psrtype;
1665
1666 if (ifp->if_mtu > ETHERMTU)
1667 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1668 else
1669 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1670
1671 psrtype = IXGBE_PSRTYPE_TCPHDR
1672 | IXGBE_PSRTYPE_UDPHDR
1673 | IXGBE_PSRTYPE_IPV4HDR
1674 | IXGBE_PSRTYPE_IPV6HDR
1675 | IXGBE_PSRTYPE_L2HDR;
1676
1677 if (adapter->num_queues > 1)
1678 psrtype |= 1 << 29;
1679
1680 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1681
1682 /* Tell PF our max_frame size */
1683 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1684 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1685 }
1686
1687 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1688 u64 rdba = rxr->rxdma.dma_paddr;
1689 u32 reg, rxdctl;
1690
1691 /* Disable the queue */
1692 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1693 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1694 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1695 for (int j = 0; j < 10; j++) {
1696 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1697 IXGBE_RXDCTL_ENABLE)
1698 msec_delay(1);
1699 else
1700 break;
1701 }
1702 wmb();
1703 /* Setup the Base and Length of the Rx Descriptor Ring */
1704 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1705 (rdba & 0x00000000ffffffffULL));
1706 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1707 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1708 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1709
1710 /* Reset the ring indices */
1711 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1713
1714 /* Set up the SRRCTL register */
1715 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1716 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1717 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1718 reg |= bufsz;
1719 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1720 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1721
1722 /* Capture Rx Tail index */
1723 rxr->tail = IXGBE_VFRDT(rxr->me);
1724
1725 /* Do the queue enabling last */
1726 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1727 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1728 for (int k = 0; k < 10; k++) {
1729 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1730 IXGBE_RXDCTL_ENABLE)
1731 break;
1732 msec_delay(1);
1733 }
1734 wmb();
1735
1736 /* Set the Tail Pointer */
1737 /*
1738 * In netmap mode, we must preserve the buffers made
1739 * available to userspace before the if_init()
1740 * (this is true by default on the TX side, because
1741 * init makes all buffers available to userspace).
1742 *
1743 * netmap_reset() and the device specific routines
1744 * (e.g. ixgbe_setup_receive_rings()) map these
1745 * buffers at the end of the NIC ring, so here we
1746 * must set the RDT (tail) register to make sure
1747 * they are not overwritten.
1748 *
1749 * In this driver the NIC ring starts at RDH = 0,
1750 * RDT points to the last slot available for reception (?),
1751 * so RDT = num_rx_desc - 1 means the whole ring is available.
1752 */
1753 #ifdef DEV_NETMAP
1754 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1755 (ifp->if_capenable & IFCAP_NETMAP)) {
1756 struct netmap_adapter *na = NA(adapter->ifp);
1757 struct netmap_kring *kring = &na->rx_rings[i];
1758 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1759
1760 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1761 } else
1762 #endif /* DEV_NETMAP */
1763 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1764 adapter->num_rx_desc - 1);
1765 }
1766
1767 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1768
1769 ixv_initialize_rss_mapping(adapter);
1770
1771 if (adapter->num_queues > 1) {
1772 /* RSS and RX IPP Checksum are mutually exclusive */
1773 rxcsum |= IXGBE_RXCSUM_PCSD;
1774 }
1775
1776 if (ifp->if_capenable & IFCAP_RXCSUM)
1777 rxcsum |= IXGBE_RXCSUM_PCSD;
1778
1779 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1780 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1781
1782 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1783
1784 return;
1785 } /* ixv_initialize_receive_units */
1786
1787 /************************************************************************
1788 * ixv_setup_vlan_support
1789 ************************************************************************/
1790 static void
1791 ixv_setup_vlan_support(struct adapter *adapter)
1792 {
1793 struct ethercom *ec = &adapter->osdep.ec;
1794 struct ixgbe_hw *hw = &adapter->hw;
1795 struct rx_ring *rxr;
1796 u32 ctrl, vid, vfta, retry;
1797
1798 /*
1799 * We get here thru init_locked, meaning
1800 * a soft reset, this has already cleared
1801 * the VFTA and other state, so if there
1802 * have been no vlan's registered do nothing.
1803 */
1804 if (!VLAN_ATTACHED(ec))
1805 return;
1806
1807 /* Enable the queues */
1808 for (int i = 0; i < adapter->num_queues; i++) {
1809 rxr = &adapter->rx_rings[i];
1810 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1811 ctrl |= IXGBE_RXDCTL_VME;
1812 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1813 /*
1814 * Let Rx path know that it needs to store VLAN tag
1815 * as part of extra mbuf info.
1816 */
1817 rxr->vtag_strip = TRUE;
1818 }
1819
1820 #if 1
1821 /* XXX dirty hack. Enable all VIDs */
1822 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1823 adapter->shadow_vfta[i] = 0xffffffff;
1824 #endif
1825 /*
1826 * A soft reset zero's out the VFTA, so
1827 * we need to repopulate it now.
1828 */
1829 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1830 if (adapter->shadow_vfta[i] == 0)
1831 continue;
1832 vfta = adapter->shadow_vfta[i];
1833 /*
1834 * Reconstruct the vlan id's
1835 * based on the bits set in each
1836 * of the array ints.
1837 */
1838 for (int j = 0; j < 32; j++) {
1839 retry = 0;
1840 if ((vfta & (1 << j)) == 0)
1841 continue;
1842 vid = (i * 32) + j;
1843 /* Call the shared code mailbox routine */
1844 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1845 if (++retry > 5)
1846 break;
1847 }
1848 }
1849 }
1850 } /* ixv_setup_vlan_support */
1851
1852 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1853 /************************************************************************
1854 * ixv_register_vlan
1855 *
1856 * Run via a vlan config EVENT, it enables us to use the
1857 * HW Filter table since we can get the vlan id. This just
1858 * creates the entry in the soft version of the VFTA, init
1859 * will repopulate the real table.
1860 ************************************************************************/
1861 static void
1862 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1863 {
1864 struct adapter *adapter = ifp->if_softc;
1865 u16 index, bit;
1866
1867 if (ifp->if_softc != arg) /* Not our event */
1868 return;
1869
1870 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1871 return;
1872
1873 IXGBE_CORE_LOCK(adapter);
1874 index = (vtag >> 5) & 0x7F;
1875 bit = vtag & 0x1F;
1876 adapter->shadow_vfta[index] |= (1 << bit);
1877 /* Re-init to load the changes */
1878 ixv_init_locked(adapter);
1879 IXGBE_CORE_UNLOCK(adapter);
1880 } /* ixv_register_vlan */
1881
1882 /************************************************************************
1883 * ixv_unregister_vlan
1884 *
1885 * Run via a vlan unconfig EVENT, remove our entry
1886 * in the soft vfta.
1887 ************************************************************************/
1888 static void
1889 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1890 {
1891 struct adapter *adapter = ifp->if_softc;
1892 u16 index, bit;
1893
1894 if (ifp->if_softc != arg)
1895 return;
1896
1897 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1898 return;
1899
1900 IXGBE_CORE_LOCK(adapter);
1901 index = (vtag >> 5) & 0x7F;
1902 bit = vtag & 0x1F;
1903 adapter->shadow_vfta[index] &= ~(1 << bit);
1904 /* Re-init to load the changes */
1905 ixv_init_locked(adapter);
1906 IXGBE_CORE_UNLOCK(adapter);
1907 } /* ixv_unregister_vlan */
1908 #endif
1909
1910 /************************************************************************
1911 * ixv_enable_intr
1912 ************************************************************************/
1913 static void
1914 ixv_enable_intr(struct adapter *adapter)
1915 {
1916 struct ixgbe_hw *hw = &adapter->hw;
1917 struct ix_queue *que = adapter->queues;
1918 u32 mask;
1919 int i;
1920
1921 /* For VTEIAC */
1922 mask = (1 << adapter->vector);
1923 for (i = 0; i < adapter->num_queues; i++, que++)
1924 mask |= (1 << que->msix);
1925 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1926
1927 /* For VTEIMS */
1928 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
1929 que = adapter->queues;
1930 for (i = 0; i < adapter->num_queues; i++, que++)
1931 ixv_enable_queue(adapter, que->msix);
1932
1933 IXGBE_WRITE_FLUSH(hw);
1934
1935 return;
1936 } /* ixv_enable_intr */
1937
1938 /************************************************************************
1939 * ixv_disable_intr
1940 ************************************************************************/
1941 static void
1942 ixv_disable_intr(struct adapter *adapter)
1943 {
1944 struct ix_queue *que = adapter->queues;
1945
1946 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1947
1948 /* disable interrupts other than queues */
1949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
1950
1951 for (int i = 0; i < adapter->num_queues; i++, que++)
1952 ixv_disable_queue(adapter, que->msix);
1953
1954 IXGBE_WRITE_FLUSH(&adapter->hw);
1955
1956 return;
1957 } /* ixv_disable_intr */
1958
1959 /************************************************************************
1960 * ixv_set_ivar
1961 *
1962 * Setup the correct IVAR register for a particular MSI-X interrupt
1963 * - entry is the register array entry
1964 * - vector is the MSI-X vector for this queue
1965 * - type is RX/TX/MISC
1966 ************************************************************************/
1967 static void
1968 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1969 {
1970 struct ixgbe_hw *hw = &adapter->hw;
1971 u32 ivar, index;
1972
1973 vector |= IXGBE_IVAR_ALLOC_VAL;
1974
1975 if (type == -1) { /* MISC IVAR */
1976 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1977 ivar &= ~0xFF;
1978 ivar |= vector;
1979 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1980 } else { /* RX/TX IVARS */
1981 index = (16 * (entry & 1)) + (8 * type);
1982 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1983 ivar &= ~(0xFF << index);
1984 ivar |= (vector << index);
1985 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1986 }
1987 } /* ixv_set_ivar */
1988
1989 /************************************************************************
1990 * ixv_configure_ivars
1991 ************************************************************************/
1992 static void
1993 ixv_configure_ivars(struct adapter *adapter)
1994 {
1995 struct ix_queue *que = adapter->queues;
1996
1997 /* XXX We should sync EITR value calculation with ixgbe.c? */
1998
1999 for (int i = 0; i < adapter->num_queues; i++, que++) {
2000 /* First the RX queue entry */
2001 ixv_set_ivar(adapter, i, que->msix, 0);
2002 /* ... and the TX */
2003 ixv_set_ivar(adapter, i, que->msix, 1);
2004 /* Set an initial value in EITR */
2005 ixv_eitr_write(que, IXGBE_EITR_DEFAULT);
2006 }
2007
2008 /* For the mailbox interrupt */
2009 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2010 } /* ixv_configure_ivars */
2011
2012
2013 /************************************************************************
2014 * ixv_save_stats
2015 *
2016 * The VF stats registers never have a truly virgin
2017 * starting point, so this routine tries to make an
2018 * artificial one, marking ground zero on attach as
2019 * it were.
2020 ************************************************************************/
2021 static void
2022 ixv_save_stats(struct adapter *adapter)
2023 {
2024 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2025
2026 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2027 stats->saved_reset_vfgprc +=
2028 stats->vfgprc.ev_count - stats->base_vfgprc;
2029 stats->saved_reset_vfgptc +=
2030 stats->vfgptc.ev_count - stats->base_vfgptc;
2031 stats->saved_reset_vfgorc +=
2032 stats->vfgorc.ev_count - stats->base_vfgorc;
2033 stats->saved_reset_vfgotc +=
2034 stats->vfgotc.ev_count - stats->base_vfgotc;
2035 stats->saved_reset_vfmprc +=
2036 stats->vfmprc.ev_count - stats->base_vfmprc;
2037 }
2038 } /* ixv_save_stats */
2039
2040 /************************************************************************
2041 * ixv_init_stats
2042 ************************************************************************/
2043 static void
2044 ixv_init_stats(struct adapter *adapter)
2045 {
2046 struct ixgbe_hw *hw = &adapter->hw;
2047
2048 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2049 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2050 adapter->stats.vf.last_vfgorc |=
2051 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2052
2053 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2054 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2055 adapter->stats.vf.last_vfgotc |=
2056 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2057
2058 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2059
2060 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2061 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2062 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2063 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2064 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2065 } /* ixv_init_stats */
2066
2067 #define UPDATE_STAT_32(reg, last, count) \
2068 { \
2069 u32 current = IXGBE_READ_REG(hw, (reg)); \
2070 if (current < (last)) \
2071 count.ev_count += 0x100000000LL; \
2072 (last) = current; \
2073 count.ev_count &= 0xFFFFFFFF00000000LL; \
2074 count.ev_count |= current; \
2075 }
2076
2077 #define UPDATE_STAT_36(lsb, msb, last, count) \
2078 { \
2079 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2080 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2081 u64 current = ((cur_msb << 32) | cur_lsb); \
2082 if (current < (last)) \
2083 count.ev_count += 0x1000000000LL; \
2084 (last) = current; \
2085 count.ev_count &= 0xFFFFFFF000000000LL; \
2086 count.ev_count |= current; \
2087 }
2088
2089 /************************************************************************
2090 * ixv_update_stats - Update the board statistics counters.
2091 ************************************************************************/
2092 void
2093 ixv_update_stats(struct adapter *adapter)
2094 {
2095 struct ixgbe_hw *hw = &adapter->hw;
2096 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2097
2098 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2099 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2100 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2101 stats->vfgorc);
2102 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2103 stats->vfgotc);
2104 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2105
2106 /* Fill out the OS statistics structure */
2107 /*
2108 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2109 * adapter->stats counters. It's required to make ifconfig -z
2110 * (SOICZIFDATA) work.
2111 */
2112 } /* ixv_update_stats */
2113
2114 const struct sysctlnode *
2115 ixv_sysctl_instance(struct adapter *adapter)
2116 {
2117 const char *dvname;
2118 struct sysctllog **log;
2119 int rc;
2120 const struct sysctlnode *rnode;
2121
2122 log = &adapter->sysctllog;
2123 dvname = device_xname(adapter->dev);
2124
2125 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2126 0, CTLTYPE_NODE, dvname,
2127 SYSCTL_DESCR("ixv information and settings"),
2128 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2129 goto err;
2130
2131 return rnode;
2132 err:
2133 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2134 return NULL;
2135 }
2136
2137 static void
2138 ixv_add_device_sysctls(struct adapter *adapter)
2139 {
2140 struct sysctllog **log;
2141 const struct sysctlnode *rnode, *cnode;
2142 device_t dev;
2143
2144 dev = adapter->dev;
2145 log = &adapter->sysctllog;
2146
2147 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2148 aprint_error_dev(dev, "could not create sysctl root\n");
2149 return;
2150 }
2151
2152 if (sysctl_createv(log, 0, &rnode, &cnode,
2153 CTLFLAG_READWRITE, CTLTYPE_INT,
2154 "debug", SYSCTL_DESCR("Debug Info"),
2155 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2156 aprint_error_dev(dev, "could not create sysctl\n");
2157
2158 if (sysctl_createv(log, 0, &rnode, &cnode,
2159 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2160 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2161 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2162 aprint_error_dev(dev, "could not create sysctl\n");
2163 }
2164
2165 /************************************************************************
2166 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2167 ************************************************************************/
2168 static void
2169 ixv_add_stats_sysctls(struct adapter *adapter)
2170 {
2171 device_t dev = adapter->dev;
2172 struct tx_ring *txr = adapter->tx_rings;
2173 struct rx_ring *rxr = adapter->rx_rings;
2174 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2175 struct ixgbe_hw *hw = &adapter->hw;
2176 const struct sysctlnode *rnode;
2177 struct sysctllog **log = &adapter->sysctllog;
2178 const char *xname = device_xname(dev);
2179
2180 /* Driver Statistics */
2181 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2182 NULL, xname, "Handled queue in softint");
2183 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2184 NULL, xname, "Requeued in softint");
2185 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2186 NULL, xname, "Driver tx dma soft fail EFBIG");
2187 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2188 NULL, xname, "m_defrag() failed");
2189 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2190 NULL, xname, "Driver tx dma hard fail EFBIG");
2191 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2192 NULL, xname, "Driver tx dma hard fail EINVAL");
2193 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2194 NULL, xname, "Driver tx dma hard fail other");
2195 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2196 NULL, xname, "Driver tx dma soft fail EAGAIN");
2197 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2198 NULL, xname, "Driver tx dma soft fail ENOMEM");
2199 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2200 NULL, xname, "Watchdog timeouts");
2201 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2202 NULL, xname, "TSO errors");
2203 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2204 NULL, xname, "Link MSI-X IRQ Handled");
2205
2206 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2207 snprintf(adapter->queues[i].evnamebuf,
2208 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2209 xname, i);
2210 snprintf(adapter->queues[i].namebuf,
2211 sizeof(adapter->queues[i].namebuf), "q%d", i);
2212
2213 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2214 aprint_error_dev(dev, "could not create sysctl root\n");
2215 break;
2216 }
2217
2218 if (sysctl_createv(log, 0, &rnode, &rnode,
2219 0, CTLTYPE_NODE,
2220 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2221 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2222 break;
2223
2224 #if 0 /* not yet */
2225 if (sysctl_createv(log, 0, &rnode, &cnode,
2226 CTLFLAG_READWRITE, CTLTYPE_INT,
2227 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2228 ixgbe_sysctl_interrupt_rate_handler, 0,
2229 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2230 break;
2231
2232 if (sysctl_createv(log, 0, &rnode, &cnode,
2233 CTLFLAG_READONLY, CTLTYPE_QUAD,
2234 "irqs", SYSCTL_DESCR("irqs on this queue"),
2235 NULL, 0, &(adapter->queues[i].irqs),
2236 0, CTL_CREATE, CTL_EOL) != 0)
2237 break;
2238
2239 if (sysctl_createv(log, 0, &rnode, &cnode,
2240 CTLFLAG_READONLY, CTLTYPE_INT,
2241 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2242 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2243 0, CTL_CREATE, CTL_EOL) != 0)
2244 break;
2245
2246 if (sysctl_createv(log, 0, &rnode, &cnode,
2247 CTLFLAG_READONLY, CTLTYPE_INT,
2248 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2249 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2250 0, CTL_CREATE, CTL_EOL) != 0)
2251 break;
2252 #endif
2253 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2254 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2255 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2256 NULL, adapter->queues[i].evnamebuf, "TSO");
2257 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2258 NULL, adapter->queues[i].evnamebuf,
2259 "Queue No Descriptor Available");
2260 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2261 NULL, adapter->queues[i].evnamebuf,
2262 "Queue Packets Transmitted");
2263 #ifndef IXGBE_LEGACY_TX
2264 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2265 NULL, adapter->queues[i].evnamebuf,
2266 "Packets dropped in pcq");
2267 #endif
2268
2269 #ifdef LRO
2270 struct lro_ctrl *lro = &rxr->lro;
2271 #endif /* LRO */
2272
2273 #if 0 /* not yet */
2274 if (sysctl_createv(log, 0, &rnode, &cnode,
2275 CTLFLAG_READONLY,
2276 CTLTYPE_INT,
2277 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2278 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2279 CTL_CREATE, CTL_EOL) != 0)
2280 break;
2281
2282 if (sysctl_createv(log, 0, &rnode, &cnode,
2283 CTLFLAG_READONLY,
2284 CTLTYPE_INT,
2285 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2286 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2287 CTL_CREATE, CTL_EOL) != 0)
2288 break;
2289 #endif
2290
2291 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2292 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2293 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2294 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2295 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2296 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2297 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2298 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2299 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2300 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2301 #ifdef LRO
2302 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2303 CTLFLAG_RD, &lro->lro_queued, 0,
2304 "LRO Queued");
2305 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2306 CTLFLAG_RD, &lro->lro_flushed, 0,
2307 "LRO Flushed");
2308 #endif /* LRO */
2309 }
2310
2311 /* MAC stats get their own sub node */
2312
2313 snprintf(stats->namebuf,
2314 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2315
2316 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2317 stats->namebuf, "rx csum offload - IP");
2318 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2319 stats->namebuf, "rx csum offload - L4");
2320 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2321 stats->namebuf, "rx csum offload - IP bad");
2322 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2323 stats->namebuf, "rx csum offload - L4 bad");
2324
2325 /* Packet Reception Stats */
2326 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2327 xname, "Good Packets Received");
2328 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2329 xname, "Good Octets Received");
2330 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2331 xname, "Multicast Packets Received");
2332 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2333 xname, "Good Packets Transmitted");
2334 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2335 xname, "Good Octets Transmitted");
2336
2337 /* Mailbox Stats */
2338 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2339 xname, "message TXs");
2340 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2341 xname, "message RXs");
2342 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2343 xname, "ACKs");
2344 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2345 xname, "REQs");
2346 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2347 xname, "RSTs");
2348
2349 } /* ixv_add_stats_sysctls */
2350
2351 /************************************************************************
2352 * ixv_set_sysctl_value
2353 ************************************************************************/
2354 static void
2355 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2356 const char *description, int *limit, int value)
2357 {
2358 device_t dev = adapter->dev;
2359 struct sysctllog **log;
2360 const struct sysctlnode *rnode, *cnode;
2361
2362 log = &adapter->sysctllog;
2363 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2364 aprint_error_dev(dev, "could not create sysctl root\n");
2365 return;
2366 }
2367 if (sysctl_createv(log, 0, &rnode, &cnode,
2368 CTLFLAG_READWRITE, CTLTYPE_INT,
2369 name, SYSCTL_DESCR(description),
2370 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2371 aprint_error_dev(dev, "could not create sysctl\n");
2372 *limit = value;
2373 } /* ixv_set_sysctl_value */
2374
2375 /************************************************************************
2376 * ixv_print_debug_info
2377 *
2378 * Called only when em_display_debug_stats is enabled.
2379 * Provides a way to take a look at important statistics
2380 * maintained by the driver and hardware.
2381 ************************************************************************/
2382 static void
2383 ixv_print_debug_info(struct adapter *adapter)
2384 {
2385 device_t dev = adapter->dev;
2386 struct ixgbe_hw *hw = &adapter->hw;
2387 struct ix_queue *que = adapter->queues;
2388 struct rx_ring *rxr;
2389 struct tx_ring *txr;
2390 #ifdef LRO
2391 struct lro_ctrl *lro;
2392 #endif /* LRO */
2393
2394 device_printf(dev, "Error Byte Count = %u \n",
2395 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2396
2397 for (int i = 0; i < adapter->num_queues; i++, que++) {
2398 txr = que->txr;
2399 rxr = que->rxr;
2400 #ifdef LRO
2401 lro = &rxr->lro;
2402 #endif /* LRO */
2403 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2404 que->msix, (long)que->irqs.ev_count);
2405 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2406 rxr->me, (long long)rxr->rx_packets.ev_count);
2407 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2408 rxr->me, (long)rxr->rx_bytes.ev_count);
2409 #ifdef LRO
2410 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2411 rxr->me, (long long)lro->lro_queued);
2412 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2413 rxr->me, (long long)lro->lro_flushed);
2414 #endif /* LRO */
2415 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2416 txr->me, (long)txr->total_packets.ev_count);
2417 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2418 txr->me, (long)txr->no_desc_avail.ev_count);
2419 }
2420
2421 device_printf(dev, "MBX IRQ Handled: %lu\n",
2422 (long)adapter->link_irq.ev_count);
2423 } /* ixv_print_debug_info */
2424
2425 /************************************************************************
2426 * ixv_sysctl_debug
2427 ************************************************************************/
2428 static int
2429 ixv_sysctl_debug(SYSCTLFN_ARGS)
2430 {
2431 struct sysctlnode node;
2432 struct adapter *adapter;
2433 int error, result;
2434
2435 node = *rnode;
2436 node.sysctl_data = &result;
2437 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2438
2439 if (error || newp == NULL)
2440 return error;
2441
2442 if (result == 1) {
2443 adapter = (struct adapter *)node.sysctl_data;
2444 ixv_print_debug_info(adapter);
2445 }
2446
2447 return 0;
2448 } /* ixv_sysctl_debug */
2449
2450 /************************************************************************
2451 * ixv_init_device_features
2452 ************************************************************************/
2453 static void
2454 ixv_init_device_features(struct adapter *adapter)
2455 {
2456 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2457 | IXGBE_FEATURE_VF
2458 | IXGBE_FEATURE_RSS
2459 | IXGBE_FEATURE_LEGACY_TX;
2460
2461 /* A tad short on feature flags for VFs, atm. */
2462 switch (adapter->hw.mac.type) {
2463 case ixgbe_mac_82599_vf:
2464 break;
2465 case ixgbe_mac_X540_vf:
2466 break;
2467 case ixgbe_mac_X550_vf:
2468 case ixgbe_mac_X550EM_x_vf:
2469 case ixgbe_mac_X550EM_a_vf:
2470 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2471 break;
2472 default:
2473 break;
2474 }
2475
2476 /* Enabled by default... */
2477 /* Is a virtual function (VF) */
2478 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2479 adapter->feat_en |= IXGBE_FEATURE_VF;
2480 /* Netmap */
2481 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2482 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2483 /* Receive-Side Scaling (RSS) */
2484 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2485 adapter->feat_en |= IXGBE_FEATURE_RSS;
2486 /* Needs advanced context descriptor regardless of offloads req'd */
2487 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2488 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2489
2490 /* Enabled via sysctl... */
2491 /* Legacy (single queue) transmit */
2492 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2493 ixv_enable_legacy_tx)
2494 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2495 } /* ixv_init_device_features */
2496
2497 /************************************************************************
2498 * ixv_shutdown - Shutdown entry point
2499 ************************************************************************/
2500 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2501 static int
2502 ixv_shutdown(device_t dev)
2503 {
2504 struct adapter *adapter = device_private(dev);
2505 IXGBE_CORE_LOCK(adapter);
2506 ixv_stop(adapter);
2507 IXGBE_CORE_UNLOCK(adapter);
2508
2509 return (0);
2510 } /* ixv_shutdown */
2511 #endif
2512
2513 static int
2514 ixv_ifflags_cb(struct ethercom *ec)
2515 {
2516 struct ifnet *ifp = &ec->ec_if;
2517 struct adapter *adapter = ifp->if_softc;
2518 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2519
2520 IXGBE_CORE_LOCK(adapter);
2521
2522 if (change != 0)
2523 adapter->if_flags = ifp->if_flags;
2524
2525 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2526 rc = ENETRESET;
2527
2528 /* Set up VLAN support and filter */
2529 ixv_setup_vlan_support(adapter);
2530
2531 IXGBE_CORE_UNLOCK(adapter);
2532
2533 return rc;
2534 }
2535
2536
2537 /************************************************************************
2538 * ixv_ioctl - Ioctl entry point
2539 *
2540 * Called when the user wants to configure the interface.
2541 *
2542 * return 0 on success, positive on failure
2543 ************************************************************************/
2544 static int
2545 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2546 {
2547 struct adapter *adapter = ifp->if_softc;
2548 struct ifcapreq *ifcr = data;
2549 struct ifreq *ifr = data;
2550 int error = 0;
2551 int l4csum_en;
2552 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2553 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2554
2555 switch (command) {
2556 case SIOCSIFFLAGS:
2557 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2558 break;
2559 case SIOCADDMULTI:
2560 case SIOCDELMULTI:
2561 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2562 break;
2563 case SIOCSIFMEDIA:
2564 case SIOCGIFMEDIA:
2565 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2566 break;
2567 case SIOCSIFCAP:
2568 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2569 break;
2570 case SIOCSIFMTU:
2571 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2572 break;
2573 default:
2574 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2575 break;
2576 }
2577
2578 switch (command) {
2579 case SIOCSIFMEDIA:
2580 case SIOCGIFMEDIA:
2581 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2582 case SIOCSIFCAP:
2583 /* Layer-4 Rx checksum offload has to be turned on and
2584 * off as a unit.
2585 */
2586 l4csum_en = ifcr->ifcr_capenable & l4csum;
2587 if (l4csum_en != l4csum && l4csum_en != 0)
2588 return EINVAL;
2589 /*FALLTHROUGH*/
2590 case SIOCADDMULTI:
2591 case SIOCDELMULTI:
2592 case SIOCSIFFLAGS:
2593 case SIOCSIFMTU:
2594 default:
2595 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2596 return error;
2597 if ((ifp->if_flags & IFF_RUNNING) == 0)
2598 ;
2599 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2600 IXGBE_CORE_LOCK(adapter);
2601 ixv_init_locked(adapter);
2602 IXGBE_CORE_UNLOCK(adapter);
2603 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2604 /*
2605 * Multicast list has changed; set the hardware filter
2606 * accordingly.
2607 */
2608 IXGBE_CORE_LOCK(adapter);
2609 ixv_disable_intr(adapter);
2610 ixv_set_multi(adapter);
2611 ixv_enable_intr(adapter);
2612 IXGBE_CORE_UNLOCK(adapter);
2613 }
2614 return 0;
2615 }
2616 } /* ixv_ioctl */
2617
2618 /************************************************************************
2619 * ixv_init
2620 ************************************************************************/
2621 static int
2622 ixv_init(struct ifnet *ifp)
2623 {
2624 struct adapter *adapter = ifp->if_softc;
2625
2626 IXGBE_CORE_LOCK(adapter);
2627 ixv_init_locked(adapter);
2628 IXGBE_CORE_UNLOCK(adapter);
2629
2630 return 0;
2631 } /* ixv_init */
2632
2633
2634 /************************************************************************
2635 * ixv_handle_que
2636 ************************************************************************/
2637 static void
2638 ixv_handle_que(void *context)
2639 {
2640 struct ix_queue *que = context;
2641 struct adapter *adapter = que->adapter;
2642 struct tx_ring *txr = que->txr;
2643 struct ifnet *ifp = adapter->ifp;
2644 bool more;
2645
2646 adapter->handleq.ev_count++;
2647
2648 if (ifp->if_flags & IFF_RUNNING) {
2649 more = ixgbe_rxeof(que);
2650 IXGBE_TX_LOCK(txr);
2651 more |= ixgbe_txeof(txr);
2652 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2653 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2654 ixgbe_mq_start_locked(ifp, txr);
2655 /* Only for queue 0 */
2656 /* NetBSD still needs this for CBQ */
2657 if ((&adapter->queues[0] == que)
2658 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2659 ixgbe_legacy_start_locked(ifp, txr);
2660 IXGBE_TX_UNLOCK(txr);
2661 if (more) {
2662 adapter->req.ev_count++;
2663 softint_schedule(que->que_si);
2664 return;
2665 }
2666 }
2667
2668 /* Re-enable this interrupt */
2669 ixv_enable_queue(adapter, que->msix);
2670
2671 return;
2672 } /* ixv_handle_que */
2673
2674 /************************************************************************
2675 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2676 ************************************************************************/
2677 static int
2678 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2679 {
2680 device_t dev = adapter->dev;
2681 struct ix_queue *que = adapter->queues;
2682 struct tx_ring *txr = adapter->tx_rings;
2683 int error, msix_ctrl, rid, vector = 0;
2684 pci_chipset_tag_t pc;
2685 pcitag_t tag;
2686 char intrbuf[PCI_INTRSTR_LEN];
2687 char intr_xname[32];
2688 const char *intrstr = NULL;
2689 kcpuset_t *affinity;
2690 int cpu_id = 0;
2691
2692 pc = adapter->osdep.pc;
2693 tag = adapter->osdep.tag;
2694
2695 adapter->osdep.nintrs = adapter->num_queues + 1;
2696 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2697 adapter->osdep.nintrs) != 0) {
2698 aprint_error_dev(dev,
2699 "failed to allocate MSI-X interrupt\n");
2700 return (ENXIO);
2701 }
2702
2703 kcpuset_create(&affinity, false);
2704 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2705 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2706 device_xname(dev), i);
2707 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2708 sizeof(intrbuf));
2709 #ifdef IXGBE_MPSAFE
2710 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2711 true);
2712 #endif
2713 /* Set the handler function */
2714 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2715 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2716 intr_xname);
2717 if (que->res == NULL) {
2718 pci_intr_release(pc, adapter->osdep.intrs,
2719 adapter->osdep.nintrs);
2720 aprint_error_dev(dev,
2721 "Failed to register QUE handler\n");
2722 kcpuset_destroy(affinity);
2723 return (ENXIO);
2724 }
2725 que->msix = vector;
2726 adapter->active_queues |= (u64)(1 << que->msix);
2727
2728 cpu_id = i;
2729 /* Round-robin affinity */
2730 kcpuset_zero(affinity);
2731 kcpuset_set(affinity, cpu_id % ncpu);
2732 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2733 NULL);
2734 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2735 intrstr);
2736 if (error == 0)
2737 aprint_normal(", bound queue %d to cpu %d\n",
2738 i, cpu_id % ncpu);
2739 else
2740 aprint_normal("\n");
2741
2742 #ifndef IXGBE_LEGACY_TX
2743 txr->txr_si
2744 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2745 ixgbe_deferred_mq_start, txr);
2746 #endif
2747 que->que_si
2748 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2749 ixv_handle_que, que);
2750 if (que->que_si == NULL) {
2751 aprint_error_dev(dev,
2752 "could not establish software interrupt\n");
2753 }
2754 }
2755
2756 /* and Mailbox */
2757 cpu_id++;
2758 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2759 adapter->vector = vector;
2760 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2761 sizeof(intrbuf));
2762 #ifdef IXGBE_MPSAFE
2763 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2764 true);
2765 #endif
2766 /* Set the mbx handler function */
2767 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2768 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2769 intr_xname);
2770 if (adapter->osdep.ihs[vector] == NULL) {
2771 adapter->res = NULL;
2772 aprint_error_dev(dev, "Failed to register LINK handler\n");
2773 kcpuset_destroy(affinity);
2774 return (ENXIO);
2775 }
2776 /* Round-robin affinity */
2777 kcpuset_zero(affinity);
2778 kcpuset_set(affinity, cpu_id % ncpu);
2779 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2780
2781 aprint_normal_dev(dev,
2782 "for link, interrupting at %s", intrstr);
2783 if (error == 0)
2784 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2785 else
2786 aprint_normal("\n");
2787
2788 /* Tasklets for Mailbox */
2789 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2790 ixv_handle_link, adapter);
2791 /*
2792 * Due to a broken design QEMU will fail to properly
2793 * enable the guest for MSI-X unless the vectors in
2794 * the table are all set up, so we must rewrite the
2795 * ENABLE in the MSI-X control register again at this
2796 * point to cause it to successfully initialize us.
2797 */
2798 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2799 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2800 rid += PCI_MSIX_CTL;
2801 msix_ctrl = pci_conf_read(pc, tag, rid);
2802 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2803 pci_conf_write(pc, tag, rid, msix_ctrl);
2804 }
2805
2806 kcpuset_destroy(affinity);
2807 return (0);
2808 } /* ixv_allocate_msix */
2809
2810 /************************************************************************
2811 * ixv_configure_interrupts - Setup MSI-X resources
2812 *
2813 * Note: The VF device MUST use MSI-X, there is no fallback.
2814 ************************************************************************/
2815 static int
2816 ixv_configure_interrupts(struct adapter *adapter)
2817 {
2818 device_t dev = adapter->dev;
2819 int want, queues, msgs;
2820
2821 /* Must have at least 2 MSI-X vectors */
2822 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2823 if (msgs < 2) {
2824 aprint_error_dev(dev, "MSIX config error\n");
2825 return (ENXIO);
2826 }
2827 msgs = MIN(msgs, IXG_MAX_NINTR);
2828
2829 /* Figure out a reasonable auto config value */
2830 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2831
2832 if (ixv_num_queues != 0)
2833 queues = ixv_num_queues;
2834 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
2835 queues = IXGBE_VF_MAX_TX_QUEUES;
2836
2837 /*
2838 * Want vectors for the queues,
2839 * plus an additional for mailbox.
2840 */
2841 want = queues + 1;
2842 if (msgs >= want)
2843 msgs = want;
2844 else {
2845 aprint_error_dev(dev,
2846 "MSI-X Configuration Problem, "
2847 "%d vectors but %d queues wanted!\n",
2848 msgs, want);
2849 return -1;
2850 }
2851
2852 adapter->msix_mem = (void *)1; /* XXX */
2853 aprint_normal_dev(dev,
2854 "Using MSI-X interrupts with %d vectors\n", msgs);
2855 adapter->num_queues = queues;
2856
2857 return (0);
2858 } /* ixv_configure_interrupts */
2859
2860
2861 /************************************************************************
2862 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2863 *
2864 * Done outside of interrupt context since the driver might sleep
2865 ************************************************************************/
2866 static void
2867 ixv_handle_link(void *context)
2868 {
2869 struct adapter *adapter = context;
2870
2871 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2872 &adapter->link_up, FALSE);
2873 ixv_update_link_status(adapter);
2874 } /* ixv_handle_link */
2875
2876 /************************************************************************
2877 * ixv_check_link - Used in the local timer to poll for link changes
2878 ************************************************************************/
2879 static void
2880 ixv_check_link(struct adapter *adapter)
2881 {
2882 adapter->hw.mac.get_link_status = TRUE;
2883
2884 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2885 &adapter->link_up, FALSE);
2886 ixv_update_link_status(adapter);
2887 } /* ixv_check_link */
2888