ixv.c revision 1.79 1 /*$NetBSD: ixv.c,v 1.79 2018/02/16 10:11:21 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121
122 static void ixv_setup_vlan_support(struct adapter *);
123 #if 0
124 static void ixv_register_vlan(void *, struct ifnet *, u16);
125 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
126 #endif
127
128 static void ixv_add_device_sysctls(struct adapter *);
129 static void ixv_save_stats(struct adapter *);
130 static void ixv_init_stats(struct adapter *);
131 static void ixv_update_stats(struct adapter *);
132 static void ixv_add_stats_sysctls(struct adapter *);
133 static void ixv_set_sysctl_value(struct adapter *, const char *,
134 const char *, int *, int);
135
136 /* The MSI-X Interrupt handlers */
137 static int ixv_msix_que(void *);
138 static int ixv_msix_mbx(void *);
139
140 /* Deferred interrupt tasklets */
141 static void ixv_handle_que(void *);
142 static void ixv_handle_link(void *);
143
144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
146
147 /************************************************************************
148 * FreeBSD Device Interface Entry Points
149 ************************************************************************/
150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
151 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
152 DVF_DETACH_SHUTDOWN);
153
154 #if 0
155 static driver_t ixv_driver = {
156 "ixv", ixv_methods, sizeof(struct adapter),
157 };
158
159 devclass_t ixv_devclass;
160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 #endif
164
165 /*
166 * TUNEABLE PARAMETERS:
167 */
168
169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
170 static int ixv_num_queues = 0;
171 #define TUNABLE_INT(__x, __y)
172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
173
174 /*
175 * AIM: Adaptive Interrupt Moderation
176 * which means that the interrupt rate
177 * is varied over time based on the
178 * traffic for that interrupt vector
179 */
180 static bool ixv_enable_aim = false;
181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
182
183 /* How many packets rxeof tries to clean at a time */
184 static int ixv_rx_process_limit = 256;
185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
186
187 /* How many packets txeof tries to clean at a time */
188 static int ixv_tx_process_limit = 256;
189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
190
191 /*
192 * Number of TX descriptors per ring,
193 * setting higher than RX as this seems
194 * the better performing choice.
195 */
196 static int ixv_txd = PERFORM_TXD;
197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
198
199 /* Number of RX descriptors per ring */
200 static int ixv_rxd = PERFORM_RXD;
201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
202
203 /* Legacy Transmit (single queue) */
204 static int ixv_enable_legacy_tx = 0;
205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
206
207 #ifdef NET_MPSAFE
208 #define IXGBE_MPSAFE 1
209 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
210 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
211 #else
212 #define IXGBE_CALLOUT_FLAGS 0
213 #define IXGBE_SOFTINFT_FLAGS 0
214 #endif
215
216 #if 0
217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
219 #endif
220
221 /************************************************************************
222 * ixv_probe - Device identification routine
223 *
224 * Determines if the driver should be loaded on
225 * adapter based on its PCI vendor/device ID.
226 *
227 * return BUS_PROBE_DEFAULT on success, positive on failure
228 ************************************************************************/
229 static int
230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
231 {
232 #ifdef __HAVE_PCI_MSI_MSIX
233 const struct pci_attach_args *pa = aux;
234
235 return (ixv_lookup(pa) != NULL) ? 1 : 0;
236 #else
237 return 0;
238 #endif
239 } /* ixv_probe */
240
241 static ixgbe_vendor_info_t *
242 ixv_lookup(const struct pci_attach_args *pa)
243 {
244 ixgbe_vendor_info_t *ent;
245 pcireg_t subid;
246
247 INIT_DEBUGOUT("ixv_lookup: begin");
248
249 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
250 return NULL;
251
252 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
253
254 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
255 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
256 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
257 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
258 (ent->subvendor_id == 0)) &&
259 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
260 (ent->subdevice_id == 0))) {
261 return ent;
262 }
263 }
264
265 return NULL;
266 }
267
268 /************************************************************************
269 * ixv_attach - Device initialization routine
270 *
271 * Called when the driver is being loaded.
272 * Identifies the type of hardware, allocates all resources
273 * and initializes the hardware.
274 *
275 * return 0 on success, positive on failure
276 ************************************************************************/
277 static void
278 ixv_attach(device_t parent, device_t dev, void *aux)
279 {
280 struct adapter *adapter;
281 struct ixgbe_hw *hw;
282 int error = 0;
283 pcireg_t id, subid;
284 ixgbe_vendor_info_t *ent;
285 const struct pci_attach_args *pa = aux;
286 const char *apivstr;
287 const char *str;
288 char buf[256];
289
290 INIT_DEBUGOUT("ixv_attach: begin");
291
292 /*
293 * Make sure BUSMASTER is set, on a VM under
294 * KVM it may not be and will break things.
295 */
296 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
297
298 /* Allocate, clear, and link in our adapter structure */
299 adapter = device_private(dev);
300 adapter->dev = dev;
301 adapter->hw.back = adapter;
302 hw = &adapter->hw;
303
304 adapter->init_locked = ixv_init_locked;
305 adapter->stop_locked = ixv_stop;
306
307 adapter->osdep.pc = pa->pa_pc;
308 adapter->osdep.tag = pa->pa_tag;
309 if (pci_dma64_available(pa))
310 adapter->osdep.dmat = pa->pa_dmat64;
311 else
312 adapter->osdep.dmat = pa->pa_dmat;
313 adapter->osdep.attached = false;
314
315 ent = ixv_lookup(pa);
316
317 KASSERT(ent != NULL);
318
319 aprint_normal(": %s, Version - %s\n",
320 ixv_strings[ent->index], ixv_driver_version);
321
322 /* Core Lock Init*/
323 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
324
325 /* Do base PCI setup - map BAR0 */
326 if (ixv_allocate_pci_resources(adapter, pa)) {
327 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
328 error = ENXIO;
329 goto err_out;
330 }
331
332 /* SYSCTL APIs */
333 ixv_add_device_sysctls(adapter);
334
335 /* Set up the timer callout */
336 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
337
338 /* Save off the information about this board */
339 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
340 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
341 hw->vendor_id = PCI_VENDOR(id);
342 hw->device_id = PCI_PRODUCT(id);
343 hw->revision_id =
344 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
345 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
346 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
347
348 /* A subset of set_mac_type */
349 switch (hw->device_id) {
350 case IXGBE_DEV_ID_82599_VF:
351 hw->mac.type = ixgbe_mac_82599_vf;
352 str = "82599 VF";
353 break;
354 case IXGBE_DEV_ID_X540_VF:
355 hw->mac.type = ixgbe_mac_X540_vf;
356 str = "X540 VF";
357 break;
358 case IXGBE_DEV_ID_X550_VF:
359 hw->mac.type = ixgbe_mac_X550_vf;
360 str = "X550 VF";
361 break;
362 case IXGBE_DEV_ID_X550EM_X_VF:
363 hw->mac.type = ixgbe_mac_X550EM_x_vf;
364 str = "X550EM X VF";
365 break;
366 case IXGBE_DEV_ID_X550EM_A_VF:
367 hw->mac.type = ixgbe_mac_X550EM_a_vf;
368 str = "X550EM A VF";
369 break;
370 default:
371 /* Shouldn't get here since probe succeeded */
372 aprint_error_dev(dev, "Unknown device ID!\n");
373 error = ENXIO;
374 goto err_out;
375 break;
376 }
377 aprint_normal_dev(dev, "device %s\n", str);
378
379 ixv_init_device_features(adapter);
380
381 /* Initialize the shared code */
382 error = ixgbe_init_ops_vf(hw);
383 if (error) {
384 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
385 error = EIO;
386 goto err_out;
387 }
388
389 /* Setup the mailbox */
390 ixgbe_init_mbx_params_vf(hw);
391
392 /* Set the right number of segments */
393 adapter->num_segs = IXGBE_82599_SCATTER;
394
395 /* Reset mbox api to 1.0 */
396 error = hw->mac.ops.reset_hw(hw);
397 if (error == IXGBE_ERR_RESET_FAILED)
398 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
399 else if (error)
400 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
401 error);
402 if (error) {
403 error = EIO;
404 goto err_out;
405 }
406
407 error = hw->mac.ops.init_hw(hw);
408 if (error) {
409 aprint_error_dev(dev, "...init_hw() failed!\n");
410 error = EIO;
411 goto err_out;
412 }
413
414 /* Negotiate mailbox API version */
415 error = ixv_negotiate_api(adapter);
416 if (error)
417 aprint_normal_dev(dev,
418 "MBX API negotiation failed during attach!\n");
419 switch (hw->api_version) {
420 case ixgbe_mbox_api_10:
421 apivstr = "1.0";
422 break;
423 case ixgbe_mbox_api_20:
424 apivstr = "2.0";
425 break;
426 case ixgbe_mbox_api_11:
427 apivstr = "1.1";
428 break;
429 case ixgbe_mbox_api_12:
430 apivstr = "1.2";
431 break;
432 case ixgbe_mbox_api_13:
433 apivstr = "1.3";
434 break;
435 default:
436 apivstr = "unknown";
437 break;
438 }
439 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
440
441 /* If no mac address was assigned, make a random one */
442 if (!ixv_check_ether_addr(hw->mac.addr)) {
443 u8 addr[ETHER_ADDR_LEN];
444 uint64_t rndval = cprng_strong64();
445
446 memcpy(addr, &rndval, sizeof(addr));
447 addr[0] &= 0xFE;
448 addr[0] |= 0x02;
449 bcopy(addr, hw->mac.addr, sizeof(addr));
450 }
451
452 /* Register for VLAN events */
453 #if 0 /* XXX delete after write? */
454 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
455 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
456 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
457 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
458 #endif
459
460 /* Sysctls for limiting the amount of work done in the taskqueues */
461 ixv_set_sysctl_value(adapter, "rx_processing_limit",
462 "max number of rx packets to process",
463 &adapter->rx_process_limit, ixv_rx_process_limit);
464
465 ixv_set_sysctl_value(adapter, "tx_processing_limit",
466 "max number of tx packets to process",
467 &adapter->tx_process_limit, ixv_tx_process_limit);
468
469 /* Do descriptor calc and sanity checks */
470 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
471 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
472 aprint_error_dev(dev, "TXD config issue, using default!\n");
473 adapter->num_tx_desc = DEFAULT_TXD;
474 } else
475 adapter->num_tx_desc = ixv_txd;
476
477 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
478 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
479 aprint_error_dev(dev, "RXD config issue, using default!\n");
480 adapter->num_rx_desc = DEFAULT_RXD;
481 } else
482 adapter->num_rx_desc = ixv_rxd;
483
484 /* Setup MSI-X */
485 error = ixv_configure_interrupts(adapter);
486 if (error)
487 goto err_out;
488
489 /* Allocate our TX/RX Queues */
490 if (ixgbe_allocate_queues(adapter)) {
491 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
492 error = ENOMEM;
493 goto err_out;
494 }
495
496 /* hw.ix defaults init */
497 adapter->enable_aim = ixv_enable_aim;
498
499 error = ixv_allocate_msix(adapter, pa);
500 if (error) {
501 device_printf(dev, "ixv_allocate_msix() failed!\n");
502 goto err_late;
503 }
504
505 /* Setup OS specific network interface */
506 error = ixv_setup_interface(dev, adapter);
507 if (error != 0) {
508 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
509 goto err_late;
510 }
511
512 /* Do the stats setup */
513 ixv_save_stats(adapter);
514 ixv_init_stats(adapter);
515 ixv_add_stats_sysctls(adapter);
516
517 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
518 ixgbe_netmap_attach(adapter);
519
520 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
521 aprint_verbose_dev(dev, "feature cap %s\n", buf);
522 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
523 aprint_verbose_dev(dev, "feature ena %s\n", buf);
524
525 INIT_DEBUGOUT("ixv_attach: end");
526 adapter->osdep.attached = true;
527
528 return;
529
530 err_late:
531 ixgbe_free_transmit_structures(adapter);
532 ixgbe_free_receive_structures(adapter);
533 free(adapter->queues, M_DEVBUF);
534 err_out:
535 ixv_free_pci_resources(adapter);
536 IXGBE_CORE_LOCK_DESTROY(adapter);
537
538 return;
539 } /* ixv_attach */
540
541 /************************************************************************
542 * ixv_detach - Device removal routine
543 *
544 * Called when the driver is being removed.
545 * Stops the adapter and deallocates all the resources
546 * that were allocated for driver operation.
547 *
548 * return 0 on success, positive on failure
549 ************************************************************************/
550 static int
551 ixv_detach(device_t dev, int flags)
552 {
553 struct adapter *adapter = device_private(dev);
554 struct ixgbe_hw *hw = &adapter->hw;
555 struct ix_queue *que = adapter->queues;
556 struct tx_ring *txr = adapter->tx_rings;
557 struct rx_ring *rxr = adapter->rx_rings;
558 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
559
560 INIT_DEBUGOUT("ixv_detach: begin");
561 if (adapter->osdep.attached == false)
562 return 0;
563
564 /* Stop the interface. Callouts are stopped in it. */
565 ixv_ifstop(adapter->ifp, 1);
566
567 #if NVLAN > 0
568 /* Make sure VLANs are not using driver */
569 if (!VLAN_ATTACHED(&adapter->osdep.ec))
570 ; /* nothing to do: no VLANs */
571 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
572 vlan_ifdetach(adapter->ifp);
573 else {
574 aprint_error_dev(dev, "VLANs in use, detach first\n");
575 return EBUSY;
576 }
577 #endif
578
579 IXGBE_CORE_LOCK(adapter);
580 ixv_stop(adapter);
581 IXGBE_CORE_UNLOCK(adapter);
582
583 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
584 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
585 softint_disestablish(txr->txr_si);
586 softint_disestablish(que->que_si);
587 }
588
589 /* Drain the Mailbox(link) queue */
590 softint_disestablish(adapter->link_si);
591
592 /* Unregister VLAN events */
593 #if 0 /* XXX msaitoh delete after write? */
594 if (adapter->vlan_attach != NULL)
595 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
596 if (adapter->vlan_detach != NULL)
597 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
598 #endif
599
600 ether_ifdetach(adapter->ifp);
601 callout_halt(&adapter->timer, NULL);
602
603 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
604 netmap_detach(adapter->ifp);
605
606 ixv_free_pci_resources(adapter);
607 #if 0 /* XXX the NetBSD port is probably missing something here */
608 bus_generic_detach(dev);
609 #endif
610 if_detach(adapter->ifp);
611 if_percpuq_destroy(adapter->ipq);
612
613 sysctl_teardown(&adapter->sysctllog);
614 evcnt_detach(&adapter->handleq);
615 evcnt_detach(&adapter->req);
616 evcnt_detach(&adapter->efbig_tx_dma_setup);
617 evcnt_detach(&adapter->mbuf_defrag_failed);
618 evcnt_detach(&adapter->efbig2_tx_dma_setup);
619 evcnt_detach(&adapter->einval_tx_dma_setup);
620 evcnt_detach(&adapter->other_tx_dma_setup);
621 evcnt_detach(&adapter->eagain_tx_dma_setup);
622 evcnt_detach(&adapter->enomem_tx_dma_setup);
623 evcnt_detach(&adapter->watchdog_events);
624 evcnt_detach(&adapter->tso_err);
625 evcnt_detach(&adapter->link_irq);
626
627 txr = adapter->tx_rings;
628 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
629 evcnt_detach(&adapter->queues[i].irqs);
630 evcnt_detach(&txr->no_desc_avail);
631 evcnt_detach(&txr->total_packets);
632 evcnt_detach(&txr->tso_tx);
633 #ifndef IXGBE_LEGACY_TX
634 evcnt_detach(&txr->pcq_drops);
635 #endif
636
637 evcnt_detach(&rxr->rx_packets);
638 evcnt_detach(&rxr->rx_bytes);
639 evcnt_detach(&rxr->rx_copies);
640 evcnt_detach(&rxr->no_jmbuf);
641 evcnt_detach(&rxr->rx_discarded);
642 }
643 evcnt_detach(&stats->ipcs);
644 evcnt_detach(&stats->l4cs);
645 evcnt_detach(&stats->ipcs_bad);
646 evcnt_detach(&stats->l4cs_bad);
647
648 /* Packet Reception Stats */
649 evcnt_detach(&stats->vfgorc);
650 evcnt_detach(&stats->vfgprc);
651 evcnt_detach(&stats->vfmprc);
652
653 /* Packet Transmission Stats */
654 evcnt_detach(&stats->vfgotc);
655 evcnt_detach(&stats->vfgptc);
656
657 /* Mailbox Stats */
658 evcnt_detach(&hw->mbx.stats.msgs_tx);
659 evcnt_detach(&hw->mbx.stats.msgs_rx);
660 evcnt_detach(&hw->mbx.stats.acks);
661 evcnt_detach(&hw->mbx.stats.reqs);
662 evcnt_detach(&hw->mbx.stats.rsts);
663
664 ixgbe_free_transmit_structures(adapter);
665 ixgbe_free_receive_structures(adapter);
666 free(adapter->queues, M_DEVBUF);
667
668 IXGBE_CORE_LOCK_DESTROY(adapter);
669
670 return (0);
671 } /* ixv_detach */
672
673 /************************************************************************
674 * ixv_init_locked - Init entry point
675 *
676 * Used in two ways: It is used by the stack as an init entry
677 * point in network interface structure. It is also used
678 * by the driver as a hw/sw initialization routine to get
679 * to a consistent state.
680 *
681 * return 0 on success, positive on failure
682 ************************************************************************/
683 static void
684 ixv_init_locked(struct adapter *adapter)
685 {
686 struct ifnet *ifp = adapter->ifp;
687 device_t dev = adapter->dev;
688 struct ixgbe_hw *hw = &adapter->hw;
689 struct ix_queue *que = adapter->queues;
690 int error = 0;
691 uint32_t mask;
692 int i;
693
694 INIT_DEBUGOUT("ixv_init_locked: begin");
695 KASSERT(mutex_owned(&adapter->core_mtx));
696 hw->adapter_stopped = FALSE;
697 hw->mac.ops.stop_adapter(hw);
698 callout_stop(&adapter->timer);
699
700 /* reprogram the RAR[0] in case user changed it. */
701 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
702
703 /* Get the latest mac address, User can use a LAA */
704 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
705 IXGBE_ETH_LENGTH_OF_ADDRESS);
706 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
707
708 /* Prepare transmit descriptors and buffers */
709 if (ixgbe_setup_transmit_structures(adapter)) {
710 aprint_error_dev(dev, "Could not setup transmit structures\n");
711 ixv_stop(adapter);
712 return;
713 }
714
715 /* Reset VF and renegotiate mailbox API version */
716 hw->mac.ops.reset_hw(hw);
717 error = ixv_negotiate_api(adapter);
718 if (error)
719 device_printf(dev,
720 "Mailbox API negotiation failed in init_locked!\n");
721
722 ixv_initialize_transmit_units(adapter);
723
724 /* Setup Multicast table */
725 ixv_set_multi(adapter);
726
727 /*
728 * Determine the correct mbuf pool
729 * for doing jumbo/headersplit
730 */
731 if (ifp->if_mtu > ETHERMTU)
732 adapter->rx_mbuf_sz = MJUMPAGESIZE;
733 else
734 adapter->rx_mbuf_sz = MCLBYTES;
735
736 /* Prepare receive descriptors and buffers */
737 if (ixgbe_setup_receive_structures(adapter)) {
738 device_printf(dev, "Could not setup receive structures\n");
739 ixv_stop(adapter);
740 return;
741 }
742
743 /* Configure RX settings */
744 ixv_initialize_receive_units(adapter);
745
746 #if 0 /* XXX isn't it required? -- msaitoh */
747 /* Set the various hardware offload abilities */
748 ifp->if_hwassist = 0;
749 if (ifp->if_capenable & IFCAP_TSO4)
750 ifp->if_hwassist |= CSUM_TSO;
751 if (ifp->if_capenable & IFCAP_TXCSUM) {
752 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
753 #if __FreeBSD_version >= 800000
754 ifp->if_hwassist |= CSUM_SCTP;
755 #endif
756 }
757 #endif
758
759 /* Set up VLAN offload and filter */
760 ixv_setup_vlan_support(adapter);
761
762 /* Set up MSI-X routing */
763 ixv_configure_ivars(adapter);
764
765 /* Set up auto-mask */
766 mask = (1 << adapter->vector);
767 for (i = 0; i < adapter->num_queues; i++, que++)
768 mask |= (1 << que->msix);
769 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
770
771 /* Set moderation on the Link interrupt */
772 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
773
774 /* Stats init */
775 ixv_init_stats(adapter);
776
777 /* Config/Enable Link */
778 hw->mac.get_link_status = TRUE;
779 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
780 FALSE);
781
782 /* Start watchdog */
783 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
784
785 /* And now turn on interrupts */
786 ixv_enable_intr(adapter);
787
788 /* Update saved flags. See ixgbe_ifflags_cb() */
789 adapter->if_flags = ifp->if_flags;
790
791 /* Now inform the stack we're ready */
792 ifp->if_flags |= IFF_RUNNING;
793 ifp->if_flags &= ~IFF_OACTIVE;
794
795 return;
796 } /* ixv_init_locked */
797
798 /*
799 * MSI-X Interrupt Handlers and Tasklets
800 */
801
802 static inline void
803 ixv_enable_queue(struct adapter *adapter, u32 vector)
804 {
805 struct ixgbe_hw *hw = &adapter->hw;
806 u32 queue = 1 << vector;
807 u32 mask;
808
809 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
810 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
811 } /* ixv_enable_queue */
812
813 static inline void
814 ixv_disable_queue(struct adapter *adapter, u32 vector)
815 {
816 struct ixgbe_hw *hw = &adapter->hw;
817 u64 queue = (u64)(1 << vector);
818 u32 mask;
819
820 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
821 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
822 } /* ixv_disable_queue */
823
824 static inline void
825 ixv_rearm_queues(struct adapter *adapter, u64 queues)
826 {
827 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
828 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
829 } /* ixv_rearm_queues */
830
831
832 /************************************************************************
833 * ixv_msix_que - MSI Queue Interrupt Service routine
834 ************************************************************************/
835 static int
836 ixv_msix_que(void *arg)
837 {
838 struct ix_queue *que = arg;
839 struct adapter *adapter = que->adapter;
840 struct tx_ring *txr = que->txr;
841 struct rx_ring *rxr = que->rxr;
842 bool more;
843 u32 newitr = 0;
844
845 ixv_disable_queue(adapter, que->msix);
846 ++que->irqs.ev_count;
847
848 #ifdef __NetBSD__
849 /* Don't run ixgbe_rxeof in interrupt context */
850 more = true;
851 #else
852 more = ixgbe_rxeof(que);
853 #endif
854
855 IXGBE_TX_LOCK(txr);
856 ixgbe_txeof(txr);
857 IXGBE_TX_UNLOCK(txr);
858
859 /* Do AIM now? */
860
861 if (adapter->enable_aim == false)
862 goto no_calc;
863 /*
864 * Do Adaptive Interrupt Moderation:
865 * - Write out last calculated setting
866 * - Calculate based on average size over
867 * the last interval.
868 */
869 if (que->eitr_setting)
870 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
871 que->eitr_setting);
872
873 que->eitr_setting = 0;
874
875 /* Idle, do nothing */
876 if ((txr->bytes == 0) && (rxr->bytes == 0))
877 goto no_calc;
878
879 if ((txr->bytes) && (txr->packets))
880 newitr = txr->bytes/txr->packets;
881 if ((rxr->bytes) && (rxr->packets))
882 newitr = max(newitr, (rxr->bytes / rxr->packets));
883 newitr += 24; /* account for hardware frame, crc */
884
885 /* set an upper boundary */
886 newitr = min(newitr, 3000);
887
888 /* Be nice to the mid range */
889 if ((newitr > 300) && (newitr < 1200))
890 newitr = (newitr / 3);
891 else
892 newitr = (newitr / 2);
893
894 newitr |= newitr << 16;
895
896 /* save for next interrupt */
897 que->eitr_setting = newitr;
898
899 /* Reset state */
900 txr->bytes = 0;
901 txr->packets = 0;
902 rxr->bytes = 0;
903 rxr->packets = 0;
904
905 no_calc:
906 if (more)
907 softint_schedule(que->que_si);
908 else /* Re-enable this interrupt */
909 ixv_enable_queue(adapter, que->msix);
910
911 return 1;
912 } /* ixv_msix_que */
913
914 /************************************************************************
915 * ixv_msix_mbx
916 ************************************************************************/
917 static int
918 ixv_msix_mbx(void *arg)
919 {
920 struct adapter *adapter = arg;
921 struct ixgbe_hw *hw = &adapter->hw;
922
923 ++adapter->link_irq.ev_count;
924 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
925
926 /* Link status change */
927 hw->mac.get_link_status = TRUE;
928 softint_schedule(adapter->link_si);
929
930 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
931
932 return 1;
933 } /* ixv_msix_mbx */
934
935 /************************************************************************
936 * ixv_media_status - Media Ioctl callback
937 *
938 * Called whenever the user queries the status of
939 * the interface using ifconfig.
940 ************************************************************************/
941 static void
942 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
943 {
944 struct adapter *adapter = ifp->if_softc;
945
946 INIT_DEBUGOUT("ixv_media_status: begin");
947 IXGBE_CORE_LOCK(adapter);
948 ixv_update_link_status(adapter);
949
950 ifmr->ifm_status = IFM_AVALID;
951 ifmr->ifm_active = IFM_ETHER;
952
953 if (!adapter->link_active) {
954 ifmr->ifm_active |= IFM_NONE;
955 IXGBE_CORE_UNLOCK(adapter);
956 return;
957 }
958
959 ifmr->ifm_status |= IFM_ACTIVE;
960
961 switch (adapter->link_speed) {
962 case IXGBE_LINK_SPEED_10GB_FULL:
963 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
964 break;
965 case IXGBE_LINK_SPEED_5GB_FULL:
966 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
967 break;
968 case IXGBE_LINK_SPEED_2_5GB_FULL:
969 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
970 break;
971 case IXGBE_LINK_SPEED_1GB_FULL:
972 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
973 break;
974 case IXGBE_LINK_SPEED_100_FULL:
975 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
976 break;
977 case IXGBE_LINK_SPEED_10_FULL:
978 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
979 break;
980 }
981
982 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
983
984 IXGBE_CORE_UNLOCK(adapter);
985
986 return;
987 } /* ixv_media_status */
988
989 /************************************************************************
990 * ixv_media_change - Media Ioctl callback
991 *
992 * Called when the user changes speed/duplex using
993 * media/mediopt option with ifconfig.
994 ************************************************************************/
995 static int
996 ixv_media_change(struct ifnet *ifp)
997 {
998 struct adapter *adapter = ifp->if_softc;
999 struct ifmedia *ifm = &adapter->media;
1000
1001 INIT_DEBUGOUT("ixv_media_change: begin");
1002
1003 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1004 return (EINVAL);
1005
1006 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1007 case IFM_AUTO:
1008 break;
1009 default:
1010 device_printf(adapter->dev, "Only auto media type\n");
1011 return (EINVAL);
1012 }
1013
1014 return (0);
1015 } /* ixv_media_change */
1016
1017
1018 /************************************************************************
1019 * ixv_negotiate_api
1020 *
1021 * Negotiate the Mailbox API with the PF;
1022 * start with the most featured API first.
1023 ************************************************************************/
1024 static int
1025 ixv_negotiate_api(struct adapter *adapter)
1026 {
1027 struct ixgbe_hw *hw = &adapter->hw;
1028 int mbx_api[] = { ixgbe_mbox_api_11,
1029 ixgbe_mbox_api_10,
1030 ixgbe_mbox_api_unknown };
1031 int i = 0;
1032
1033 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1034 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1035 return (0);
1036 i++;
1037 }
1038
1039 return (EINVAL);
1040 } /* ixv_negotiate_api */
1041
1042
1043 /************************************************************************
1044 * ixv_set_multi - Multicast Update
1045 *
1046 * Called whenever multicast address list is updated.
1047 ************************************************************************/
1048 static void
1049 ixv_set_multi(struct adapter *adapter)
1050 {
1051 struct ether_multi *enm;
1052 struct ether_multistep step;
1053 struct ethercom *ec = &adapter->osdep.ec;
1054 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1055 u8 *update_ptr;
1056 int mcnt = 0;
1057
1058 KASSERT(mutex_owned(&adapter->core_mtx));
1059 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1060
1061 ETHER_LOCK(ec);
1062 ETHER_FIRST_MULTI(step, ec, enm);
1063 while (enm != NULL) {
1064 bcopy(enm->enm_addrlo,
1065 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1066 IXGBE_ETH_LENGTH_OF_ADDRESS);
1067 mcnt++;
1068 /* XXX This might be required --msaitoh */
1069 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1070 break;
1071 ETHER_NEXT_MULTI(step, enm);
1072 }
1073 ETHER_UNLOCK(ec);
1074
1075 update_ptr = mta;
1076
1077 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1078 ixv_mc_array_itr, TRUE);
1079
1080 return;
1081 } /* ixv_set_multi */
1082
1083 /************************************************************************
1084 * ixv_mc_array_itr
1085 *
1086 * An iterator function needed by the multicast shared code.
1087 * It feeds the shared code routine the addresses in the
1088 * array of ixv_set_multi() one by one.
1089 ************************************************************************/
1090 static u8 *
1091 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1092 {
1093 u8 *addr = *update_ptr;
1094 u8 *newptr;
1095 *vmdq = 0;
1096
1097 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1098 *update_ptr = newptr;
1099
1100 return addr;
1101 } /* ixv_mc_array_itr */
1102
1103 /************************************************************************
1104 * ixv_local_timer - Timer routine
1105 *
1106 * Checks for link status, updates statistics,
1107 * and runs the watchdog check.
1108 ************************************************************************/
1109 static void
1110 ixv_local_timer(void *arg)
1111 {
1112 struct adapter *adapter = arg;
1113
1114 IXGBE_CORE_LOCK(adapter);
1115 ixv_local_timer_locked(adapter);
1116 IXGBE_CORE_UNLOCK(adapter);
1117 }
1118
1119 static void
1120 ixv_local_timer_locked(void *arg)
1121 {
1122 struct adapter *adapter = arg;
1123 device_t dev = adapter->dev;
1124 struct ix_queue *que = adapter->queues;
1125 u64 queues = 0;
1126 int hung = 0;
1127
1128 KASSERT(mutex_owned(&adapter->core_mtx));
1129
1130 ixv_check_link(adapter);
1131
1132 /* Stats Update */
1133 ixv_update_stats(adapter);
1134
1135 /*
1136 * Check the TX queues status
1137 * - mark hung queues so we don't schedule on them
1138 * - watchdog only if all queues show hung
1139 */
1140 for (int i = 0; i < adapter->num_queues; i++, que++) {
1141 /* Keep track of queues with work for soft irq */
1142 if (que->txr->busy)
1143 queues |= ((u64)1 << que->me);
1144 /*
1145 * Each time txeof runs without cleaning, but there
1146 * are uncleaned descriptors it increments busy. If
1147 * we get to the MAX we declare it hung.
1148 */
1149 if (que->busy == IXGBE_QUEUE_HUNG) {
1150 ++hung;
1151 /* Mark the queue as inactive */
1152 adapter->active_queues &= ~((u64)1 << que->me);
1153 continue;
1154 } else {
1155 /* Check if we've come back from hung */
1156 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1157 adapter->active_queues |= ((u64)1 << que->me);
1158 }
1159 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1160 device_printf(dev,
1161 "Warning queue %d appears to be hung!\n", i);
1162 que->txr->busy = IXGBE_QUEUE_HUNG;
1163 ++hung;
1164 }
1165 }
1166
1167 /* Only truly watchdog if all queues show hung */
1168 if (hung == adapter->num_queues)
1169 goto watchdog;
1170 else if (queues != 0) { /* Force an IRQ on queues with work */
1171 ixv_rearm_queues(adapter, queues);
1172 }
1173
1174 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1175
1176 return;
1177
1178 watchdog:
1179
1180 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1181 adapter->ifp->if_flags &= ~IFF_RUNNING;
1182 adapter->watchdog_events.ev_count++;
1183 ixv_init_locked(adapter);
1184 } /* ixv_local_timer */
1185
1186 /************************************************************************
1187 * ixv_update_link_status - Update OS on link state
1188 *
1189 * Note: Only updates the OS on the cached link state.
1190 * The real check of the hardware only happens with
1191 * a link interrupt.
1192 ************************************************************************/
1193 static void
1194 ixv_update_link_status(struct adapter *adapter)
1195 {
1196 struct ifnet *ifp = adapter->ifp;
1197 device_t dev = adapter->dev;
1198
1199 if (adapter->link_up) {
1200 if (adapter->link_active == FALSE) {
1201 if (bootverbose) {
1202 const char *bpsmsg;
1203
1204 switch (adapter->link_speed) {
1205 case IXGBE_LINK_SPEED_10GB_FULL:
1206 bpsmsg = "10 Gbps";
1207 break;
1208 case IXGBE_LINK_SPEED_5GB_FULL:
1209 bpsmsg = "5 Gbps";
1210 break;
1211 case IXGBE_LINK_SPEED_2_5GB_FULL:
1212 bpsmsg = "2.5 Gbps";
1213 break;
1214 case IXGBE_LINK_SPEED_1GB_FULL:
1215 bpsmsg = "1 Gbps";
1216 break;
1217 case IXGBE_LINK_SPEED_100_FULL:
1218 bpsmsg = "100 Mbps";
1219 break;
1220 case IXGBE_LINK_SPEED_10_FULL:
1221 bpsmsg = "10 Mbps";
1222 break;
1223 default:
1224 bpsmsg = "unknown speed";
1225 break;
1226 }
1227 device_printf(dev, "Link is up %s %s \n",
1228 bpsmsg, "Full Duplex");
1229 }
1230 adapter->link_active = TRUE;
1231 if_link_state_change(ifp, LINK_STATE_UP);
1232 }
1233 } else { /* Link down */
1234 if (adapter->link_active == TRUE) {
1235 if (bootverbose)
1236 device_printf(dev, "Link is Down\n");
1237 if_link_state_change(ifp, LINK_STATE_DOWN);
1238 adapter->link_active = FALSE;
1239 }
1240 }
1241
1242 return;
1243 } /* ixv_update_link_status */
1244
1245
1246 /************************************************************************
1247 * ixv_stop - Stop the hardware
1248 *
1249 * Disables all traffic on the adapter by issuing a
1250 * global reset on the MAC and deallocates TX/RX buffers.
1251 ************************************************************************/
1252 static void
1253 ixv_ifstop(struct ifnet *ifp, int disable)
1254 {
1255 struct adapter *adapter = ifp->if_softc;
1256
1257 IXGBE_CORE_LOCK(adapter);
1258 ixv_stop(adapter);
1259 IXGBE_CORE_UNLOCK(adapter);
1260 }
1261
1262 static void
1263 ixv_stop(void *arg)
1264 {
1265 struct ifnet *ifp;
1266 struct adapter *adapter = arg;
1267 struct ixgbe_hw *hw = &adapter->hw;
1268
1269 ifp = adapter->ifp;
1270
1271 KASSERT(mutex_owned(&adapter->core_mtx));
1272
1273 INIT_DEBUGOUT("ixv_stop: begin\n");
1274 ixv_disable_intr(adapter);
1275
1276 /* Tell the stack that the interface is no longer active */
1277 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1278
1279 hw->mac.ops.reset_hw(hw);
1280 adapter->hw.adapter_stopped = FALSE;
1281 hw->mac.ops.stop_adapter(hw);
1282 callout_stop(&adapter->timer);
1283
1284 /* reprogram the RAR[0] in case user changed it. */
1285 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1286
1287 return;
1288 } /* ixv_stop */
1289
1290
1291 /************************************************************************
1292 * ixv_allocate_pci_resources
1293 ************************************************************************/
1294 static int
1295 ixv_allocate_pci_resources(struct adapter *adapter,
1296 const struct pci_attach_args *pa)
1297 {
1298 pcireg_t memtype;
1299 device_t dev = adapter->dev;
1300 bus_addr_t addr;
1301 int flags;
1302
1303 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1304 switch (memtype) {
1305 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1306 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1307 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1308 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1309 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1310 goto map_err;
1311 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1312 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1313 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1314 }
1315 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1316 adapter->osdep.mem_size, flags,
1317 &adapter->osdep.mem_bus_space_handle) != 0) {
1318 map_err:
1319 adapter->osdep.mem_size = 0;
1320 aprint_error_dev(dev, "unable to map BAR0\n");
1321 return ENXIO;
1322 }
1323 break;
1324 default:
1325 aprint_error_dev(dev, "unexpected type on BAR0\n");
1326 return ENXIO;
1327 }
1328
1329 /* Pick up the tuneable queues */
1330 adapter->num_queues = ixv_num_queues;
1331
1332 return (0);
1333 } /* ixv_allocate_pci_resources */
1334
1335 /************************************************************************
1336 * ixv_free_pci_resources
1337 ************************************************************************/
1338 static void
1339 ixv_free_pci_resources(struct adapter * adapter)
1340 {
1341 struct ix_queue *que = adapter->queues;
1342 int rid;
1343
1344 /*
1345 * Release all msix queue resources:
1346 */
1347 for (int i = 0; i < adapter->num_queues; i++, que++) {
1348 if (que->res != NULL)
1349 pci_intr_disestablish(adapter->osdep.pc,
1350 adapter->osdep.ihs[i]);
1351 }
1352
1353
1354 /* Clean the Mailbox interrupt last */
1355 rid = adapter->vector;
1356
1357 if (adapter->osdep.ihs[rid] != NULL) {
1358 pci_intr_disestablish(adapter->osdep.pc,
1359 adapter->osdep.ihs[rid]);
1360 adapter->osdep.ihs[rid] = NULL;
1361 }
1362
1363 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1364 adapter->osdep.nintrs);
1365
1366 if (adapter->osdep.mem_size != 0) {
1367 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1368 adapter->osdep.mem_bus_space_handle,
1369 adapter->osdep.mem_size);
1370 }
1371
1372 return;
1373 } /* ixv_free_pci_resources */
1374
1375 /************************************************************************
1376 * ixv_setup_interface
1377 *
1378 * Setup networking device structure and register an interface.
1379 ************************************************************************/
1380 static int
1381 ixv_setup_interface(device_t dev, struct adapter *adapter)
1382 {
1383 struct ethercom *ec = &adapter->osdep.ec;
1384 struct ifnet *ifp;
1385 int rv;
1386
1387 INIT_DEBUGOUT("ixv_setup_interface: begin");
1388
1389 ifp = adapter->ifp = &ec->ec_if;
1390 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1391 ifp->if_baudrate = IF_Gbps(10);
1392 ifp->if_init = ixv_init;
1393 ifp->if_stop = ixv_ifstop;
1394 ifp->if_softc = adapter;
1395 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1396 #ifdef IXGBE_MPSAFE
1397 ifp->if_extflags = IFEF_MPSAFE;
1398 #endif
1399 ifp->if_ioctl = ixv_ioctl;
1400 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1401 #if 0
1402 ixv_start_locked = ixgbe_legacy_start_locked;
1403 #endif
1404 } else {
1405 ifp->if_transmit = ixgbe_mq_start;
1406 #if 0
1407 ixv_start_locked = ixgbe_mq_start_locked;
1408 #endif
1409 }
1410 ifp->if_start = ixgbe_legacy_start;
1411 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1412 IFQ_SET_READY(&ifp->if_snd);
1413
1414 rv = if_initialize(ifp);
1415 if (rv != 0) {
1416 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1417 return rv;
1418 }
1419 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1420 ether_ifattach(ifp, adapter->hw.mac.addr);
1421 /*
1422 * We use per TX queue softint, so if_deferred_start_init() isn't
1423 * used.
1424 */
1425 if_register(ifp);
1426 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1427
1428 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1429
1430 /*
1431 * Tell the upper layer(s) we support long frames.
1432 */
1433 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1434
1435 /* Set capability flags */
1436 ifp->if_capabilities |= IFCAP_HWCSUM
1437 | IFCAP_TSOv4
1438 | IFCAP_TSOv6;
1439 ifp->if_capenable = 0;
1440
1441 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1442 | ETHERCAP_VLAN_HWCSUM
1443 | ETHERCAP_JUMBO_MTU
1444 | ETHERCAP_VLAN_MTU;
1445
1446 /* Enable the above capabilities by default */
1447 ec->ec_capenable = ec->ec_capabilities;
1448
1449 /* Don't enable LRO by default */
1450 ifp->if_capabilities |= IFCAP_LRO;
1451 #if 0
1452 ifp->if_capenable = ifp->if_capabilities;
1453 #endif
1454
1455 /*
1456 * Specify the media types supported by this adapter and register
1457 * callbacks to update media and link information
1458 */
1459 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1460 ixv_media_status);
1461 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1462 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1463
1464 return 0;
1465 } /* ixv_setup_interface */
1466
1467
1468 /************************************************************************
1469 * ixv_initialize_transmit_units - Enable transmit unit.
1470 ************************************************************************/
1471 static void
1472 ixv_initialize_transmit_units(struct adapter *adapter)
1473 {
1474 struct tx_ring *txr = adapter->tx_rings;
1475 struct ixgbe_hw *hw = &adapter->hw;
1476
1477
1478 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1479 u64 tdba = txr->txdma.dma_paddr;
1480 u32 txctrl, txdctl;
1481
1482 /* Set WTHRESH to 8, burst writeback */
1483 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1484 txdctl |= (8 << 16);
1485 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1486
1487 /* Set the HW Tx Head and Tail indices */
1488 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1490
1491 /* Set Tx Tail register */
1492 txr->tail = IXGBE_VFTDT(i);
1493
1494 /* Set Ring parameters */
1495 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1496 (tdba & 0x00000000ffffffffULL));
1497 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1498 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1499 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1500 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1501 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1502 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1503
1504 /* Now enable */
1505 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1506 txdctl |= IXGBE_TXDCTL_ENABLE;
1507 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1508 }
1509
1510 return;
1511 } /* ixv_initialize_transmit_units */
1512
1513
1514 /************************************************************************
1515 * ixv_initialize_rss_mapping
1516 ************************************************************************/
1517 static void
1518 ixv_initialize_rss_mapping(struct adapter *adapter)
1519 {
1520 struct ixgbe_hw *hw = &adapter->hw;
1521 u32 reta = 0, mrqc, rss_key[10];
1522 int queue_id;
1523 int i, j;
1524 u32 rss_hash_config;
1525
1526 /* force use default RSS key. */
1527 #ifdef __NetBSD__
1528 rss_getkey((uint8_t *) &rss_key);
1529 #else
1530 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1531 /* Fetch the configured RSS key */
1532 rss_getkey((uint8_t *)&rss_key);
1533 } else {
1534 /* set up random bits */
1535 cprng_fast(&rss_key, sizeof(rss_key));
1536 }
1537 #endif
1538
1539 /* Now fill out hash function seeds */
1540 for (i = 0; i < 10; i++)
1541 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1542
1543 /* Set up the redirection table */
1544 for (i = 0, j = 0; i < 64; i++, j++) {
1545 if (j == adapter->num_queues)
1546 j = 0;
1547
1548 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1549 /*
1550 * Fetch the RSS bucket id for the given indirection
1551 * entry. Cap it at the number of configured buckets
1552 * (which is num_queues.)
1553 */
1554 queue_id = rss_get_indirection_to_bucket(i);
1555 queue_id = queue_id % adapter->num_queues;
1556 } else
1557 queue_id = j;
1558
1559 /*
1560 * The low 8 bits are for hash value (n+0);
1561 * The next 8 bits are for hash value (n+1), etc.
1562 */
1563 reta >>= 8;
1564 reta |= ((uint32_t)queue_id) << 24;
1565 if ((i & 3) == 3) {
1566 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1567 reta = 0;
1568 }
1569 }
1570
1571 /* Perform hash on these packet types */
1572 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1573 rss_hash_config = rss_gethashconfig();
1574 else {
1575 /*
1576 * Disable UDP - IP fragments aren't currently being handled
1577 * and so we end up with a mix of 2-tuple and 4-tuple
1578 * traffic.
1579 */
1580 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1581 | RSS_HASHTYPE_RSS_TCP_IPV4
1582 | RSS_HASHTYPE_RSS_IPV6
1583 | RSS_HASHTYPE_RSS_TCP_IPV6;
1584 }
1585
1586 mrqc = IXGBE_MRQC_RSSEN;
1587 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1588 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1589 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1590 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1591 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1592 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1593 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1594 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1595 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1596 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1597 __func__);
1598 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1599 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1600 __func__);
1601 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1602 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1603 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1604 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1605 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1606 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1607 __func__);
1608 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1609 } /* ixv_initialize_rss_mapping */
1610
1611
1612 /************************************************************************
1613 * ixv_initialize_receive_units - Setup receive registers and features.
1614 ************************************************************************/
1615 static void
1616 ixv_initialize_receive_units(struct adapter *adapter)
1617 {
1618 struct rx_ring *rxr = adapter->rx_rings;
1619 struct ixgbe_hw *hw = &adapter->hw;
1620 struct ifnet *ifp = adapter->ifp;
1621 u32 bufsz, rxcsum, psrtype;
1622
1623 if (ifp->if_mtu > ETHERMTU)
1624 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1625 else
1626 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1627
1628 psrtype = IXGBE_PSRTYPE_TCPHDR
1629 | IXGBE_PSRTYPE_UDPHDR
1630 | IXGBE_PSRTYPE_IPV4HDR
1631 | IXGBE_PSRTYPE_IPV6HDR
1632 | IXGBE_PSRTYPE_L2HDR;
1633
1634 if (adapter->num_queues > 1)
1635 psrtype |= 1 << 29;
1636
1637 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1638
1639 /* Tell PF our max_frame size */
1640 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1641 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1642 }
1643
1644 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1645 u64 rdba = rxr->rxdma.dma_paddr;
1646 u32 reg, rxdctl;
1647
1648 /* Disable the queue */
1649 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1650 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1651 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1652 for (int j = 0; j < 10; j++) {
1653 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1654 IXGBE_RXDCTL_ENABLE)
1655 msec_delay(1);
1656 else
1657 break;
1658 }
1659 wmb();
1660 /* Setup the Base and Length of the Rx Descriptor Ring */
1661 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1662 (rdba & 0x00000000ffffffffULL));
1663 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1664 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1665 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1666
1667 /* Reset the ring indices */
1668 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1669 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1670
1671 /* Set up the SRRCTL register */
1672 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1673 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1674 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1675 reg |= bufsz;
1676 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1677 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1678
1679 /* Capture Rx Tail index */
1680 rxr->tail = IXGBE_VFRDT(rxr->me);
1681
1682 /* Do the queue enabling last */
1683 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1684 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1685 for (int k = 0; k < 10; k++) {
1686 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1687 IXGBE_RXDCTL_ENABLE)
1688 break;
1689 msec_delay(1);
1690 }
1691 wmb();
1692
1693 /* Set the Tail Pointer */
1694 /*
1695 * In netmap mode, we must preserve the buffers made
1696 * available to userspace before the if_init()
1697 * (this is true by default on the TX side, because
1698 * init makes all buffers available to userspace).
1699 *
1700 * netmap_reset() and the device specific routines
1701 * (e.g. ixgbe_setup_receive_rings()) map these
1702 * buffers at the end of the NIC ring, so here we
1703 * must set the RDT (tail) register to make sure
1704 * they are not overwritten.
1705 *
1706 * In this driver the NIC ring starts at RDH = 0,
1707 * RDT points to the last slot available for reception (?),
1708 * so RDT = num_rx_desc - 1 means the whole ring is available.
1709 */
1710 #ifdef DEV_NETMAP
1711 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1712 (ifp->if_capenable & IFCAP_NETMAP)) {
1713 struct netmap_adapter *na = NA(adapter->ifp);
1714 struct netmap_kring *kring = &na->rx_rings[i];
1715 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1716
1717 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1718 } else
1719 #endif /* DEV_NETMAP */
1720 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1721 adapter->num_rx_desc - 1);
1722 }
1723
1724 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1725
1726 ixv_initialize_rss_mapping(adapter);
1727
1728 if (adapter->num_queues > 1) {
1729 /* RSS and RX IPP Checksum are mutually exclusive */
1730 rxcsum |= IXGBE_RXCSUM_PCSD;
1731 }
1732
1733 if (ifp->if_capenable & IFCAP_RXCSUM)
1734 rxcsum |= IXGBE_RXCSUM_PCSD;
1735
1736 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1737 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1738
1739 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1740
1741 return;
1742 } /* ixv_initialize_receive_units */
1743
1744 /************************************************************************
1745 * ixv_setup_vlan_support
1746 ************************************************************************/
1747 static void
1748 ixv_setup_vlan_support(struct adapter *adapter)
1749 {
1750 struct ethercom *ec = &adapter->osdep.ec;
1751 struct ixgbe_hw *hw = &adapter->hw;
1752 struct rx_ring *rxr;
1753 u32 ctrl, vid, vfta, retry;
1754
1755 /*
1756 * We get here thru init_locked, meaning
1757 * a soft reset, this has already cleared
1758 * the VFTA and other state, so if there
1759 * have been no vlan's registered do nothing.
1760 */
1761 if (!VLAN_ATTACHED(ec))
1762 return;
1763
1764 /* Enable the queues */
1765 for (int i = 0; i < adapter->num_queues; i++) {
1766 rxr = &adapter->rx_rings[i];
1767 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1768 ctrl |= IXGBE_RXDCTL_VME;
1769 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1770 /*
1771 * Let Rx path know that it needs to store VLAN tag
1772 * as part of extra mbuf info.
1773 */
1774 rxr->vtag_strip = TRUE;
1775 }
1776
1777 #if 1
1778 /* XXX dirty hack. Enable all VIDs */
1779 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1780 adapter->shadow_vfta[i] = 0xffffffff;
1781 #endif
1782 /*
1783 * A soft reset zero's out the VFTA, so
1784 * we need to repopulate it now.
1785 */
1786 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1787 if (adapter->shadow_vfta[i] == 0)
1788 continue;
1789 vfta = adapter->shadow_vfta[i];
1790 /*
1791 * Reconstruct the vlan id's
1792 * based on the bits set in each
1793 * of the array ints.
1794 */
1795 for (int j = 0; j < 32; j++) {
1796 retry = 0;
1797 if ((vfta & (1 << j)) == 0)
1798 continue;
1799 vid = (i * 32) + j;
1800 /* Call the shared code mailbox routine */
1801 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1802 if (++retry > 5)
1803 break;
1804 }
1805 }
1806 }
1807 } /* ixv_setup_vlan_support */
1808
1809 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1810 /************************************************************************
1811 * ixv_register_vlan
1812 *
1813 * Run via a vlan config EVENT, it enables us to use the
1814 * HW Filter table since we can get the vlan id. This just
1815 * creates the entry in the soft version of the VFTA, init
1816 * will repopulate the real table.
1817 ************************************************************************/
1818 static void
1819 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1820 {
1821 struct adapter *adapter = ifp->if_softc;
1822 u16 index, bit;
1823
1824 if (ifp->if_softc != arg) /* Not our event */
1825 return;
1826
1827 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1828 return;
1829
1830 IXGBE_CORE_LOCK(adapter);
1831 index = (vtag >> 5) & 0x7F;
1832 bit = vtag & 0x1F;
1833 adapter->shadow_vfta[index] |= (1 << bit);
1834 /* Re-init to load the changes */
1835 ixv_init_locked(adapter);
1836 IXGBE_CORE_UNLOCK(adapter);
1837 } /* ixv_register_vlan */
1838
1839 /************************************************************************
1840 * ixv_unregister_vlan
1841 *
1842 * Run via a vlan unconfig EVENT, remove our entry
1843 * in the soft vfta.
1844 ************************************************************************/
1845 static void
1846 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1847 {
1848 struct adapter *adapter = ifp->if_softc;
1849 u16 index, bit;
1850
1851 if (ifp->if_softc != arg)
1852 return;
1853
1854 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1855 return;
1856
1857 IXGBE_CORE_LOCK(adapter);
1858 index = (vtag >> 5) & 0x7F;
1859 bit = vtag & 0x1F;
1860 adapter->shadow_vfta[index] &= ~(1 << bit);
1861 /* Re-init to load the changes */
1862 ixv_init_locked(adapter);
1863 IXGBE_CORE_UNLOCK(adapter);
1864 } /* ixv_unregister_vlan */
1865 #endif
1866
1867 /************************************************************************
1868 * ixv_enable_intr
1869 ************************************************************************/
1870 static void
1871 ixv_enable_intr(struct adapter *adapter)
1872 {
1873 struct ixgbe_hw *hw = &adapter->hw;
1874 struct ix_queue *que = adapter->queues;
1875 u32 mask;
1876 int i;
1877
1878 /* For VTEIAC */
1879 mask = (1 << adapter->vector);
1880 for (i = 0; i < adapter->num_queues; i++, que++)
1881 mask |= (1 << que->msix);
1882 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1883
1884 /* For VTEIMS */
1885 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
1886 que = adapter->queues;
1887 for (i = 0; i < adapter->num_queues; i++, que++)
1888 ixv_enable_queue(adapter, que->msix);
1889
1890 IXGBE_WRITE_FLUSH(hw);
1891
1892 return;
1893 } /* ixv_enable_intr */
1894
1895 /************************************************************************
1896 * ixv_disable_intr
1897 ************************************************************************/
1898 static void
1899 ixv_disable_intr(struct adapter *adapter)
1900 {
1901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1903 IXGBE_WRITE_FLUSH(&adapter->hw);
1904
1905 return;
1906 } /* ixv_disable_intr */
1907
1908 /************************************************************************
1909 * ixv_set_ivar
1910 *
1911 * Setup the correct IVAR register for a particular MSI-X interrupt
1912 * - entry is the register array entry
1913 * - vector is the MSI-X vector for this queue
1914 * - type is RX/TX/MISC
1915 ************************************************************************/
1916 static void
1917 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1918 {
1919 struct ixgbe_hw *hw = &adapter->hw;
1920 u32 ivar, index;
1921
1922 vector |= IXGBE_IVAR_ALLOC_VAL;
1923
1924 if (type == -1) { /* MISC IVAR */
1925 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1926 ivar &= ~0xFF;
1927 ivar |= vector;
1928 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1929 } else { /* RX/TX IVARS */
1930 index = (16 * (entry & 1)) + (8 * type);
1931 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1932 ivar &= ~(0xFF << index);
1933 ivar |= (vector << index);
1934 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1935 }
1936 } /* ixv_set_ivar */
1937
1938 /************************************************************************
1939 * ixv_configure_ivars
1940 ************************************************************************/
1941 static void
1942 ixv_configure_ivars(struct adapter *adapter)
1943 {
1944 struct ix_queue *que = adapter->queues;
1945
1946 for (int i = 0; i < adapter->num_queues; i++, que++) {
1947 /* First the RX queue entry */
1948 ixv_set_ivar(adapter, i, que->msix, 0);
1949 /* ... and the TX */
1950 ixv_set_ivar(adapter, i, que->msix, 1);
1951 /* Set an initial value in EITR */
1952 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1953 IXGBE_EITR_DEFAULT);
1954 }
1955
1956 /* For the mailbox interrupt */
1957 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1958 } /* ixv_configure_ivars */
1959
1960
1961 /************************************************************************
1962 * ixv_save_stats
1963 *
1964 * The VF stats registers never have a truly virgin
1965 * starting point, so this routine tries to make an
1966 * artificial one, marking ground zero on attach as
1967 * it were.
1968 ************************************************************************/
1969 static void
1970 ixv_save_stats(struct adapter *adapter)
1971 {
1972 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1973
1974 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
1975 stats->saved_reset_vfgprc +=
1976 stats->vfgprc.ev_count - stats->base_vfgprc;
1977 stats->saved_reset_vfgptc +=
1978 stats->vfgptc.ev_count - stats->base_vfgptc;
1979 stats->saved_reset_vfgorc +=
1980 stats->vfgorc.ev_count - stats->base_vfgorc;
1981 stats->saved_reset_vfgotc +=
1982 stats->vfgotc.ev_count - stats->base_vfgotc;
1983 stats->saved_reset_vfmprc +=
1984 stats->vfmprc.ev_count - stats->base_vfmprc;
1985 }
1986 } /* ixv_save_stats */
1987
1988 /************************************************************************
1989 * ixv_init_stats
1990 ************************************************************************/
1991 static void
1992 ixv_init_stats(struct adapter *adapter)
1993 {
1994 struct ixgbe_hw *hw = &adapter->hw;
1995
1996 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1997 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1998 adapter->stats.vf.last_vfgorc |=
1999 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2000
2001 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2002 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2003 adapter->stats.vf.last_vfgotc |=
2004 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2005
2006 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2007
2008 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2009 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2010 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2011 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2012 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2013 } /* ixv_init_stats */
2014
2015 #define UPDATE_STAT_32(reg, last, count) \
2016 { \
2017 u32 current = IXGBE_READ_REG(hw, (reg)); \
2018 if (current < (last)) \
2019 count.ev_count += 0x100000000LL; \
2020 (last) = current; \
2021 count.ev_count &= 0xFFFFFFFF00000000LL; \
2022 count.ev_count |= current; \
2023 }
2024
2025 #define UPDATE_STAT_36(lsb, msb, last, count) \
2026 { \
2027 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2028 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2029 u64 current = ((cur_msb << 32) | cur_lsb); \
2030 if (current < (last)) \
2031 count.ev_count += 0x1000000000LL; \
2032 (last) = current; \
2033 count.ev_count &= 0xFFFFFFF000000000LL; \
2034 count.ev_count |= current; \
2035 }
2036
2037 /************************************************************************
2038 * ixv_update_stats - Update the board statistics counters.
2039 ************************************************************************/
2040 void
2041 ixv_update_stats(struct adapter *adapter)
2042 {
2043 struct ixgbe_hw *hw = &adapter->hw;
2044 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2045
2046 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2047 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2048 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2049 stats->vfgorc);
2050 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2051 stats->vfgotc);
2052 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2053
2054 /* Fill out the OS statistics structure */
2055 /*
2056 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2057 * adapter->stats counters. It's required to make ifconfig -z
2058 * (SOICZIFDATA) work.
2059 */
2060 } /* ixv_update_stats */
2061
2062 const struct sysctlnode *
2063 ixv_sysctl_instance(struct adapter *adapter)
2064 {
2065 const char *dvname;
2066 struct sysctllog **log;
2067 int rc;
2068 const struct sysctlnode *rnode;
2069
2070 log = &adapter->sysctllog;
2071 dvname = device_xname(adapter->dev);
2072
2073 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2074 0, CTLTYPE_NODE, dvname,
2075 SYSCTL_DESCR("ixv information and settings"),
2076 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2077 goto err;
2078
2079 return rnode;
2080 err:
2081 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2082 return NULL;
2083 }
2084
2085 static void
2086 ixv_add_device_sysctls(struct adapter *adapter)
2087 {
2088 struct sysctllog **log;
2089 const struct sysctlnode *rnode, *cnode;
2090 device_t dev;
2091
2092 dev = adapter->dev;
2093 log = &adapter->sysctllog;
2094
2095 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2096 aprint_error_dev(dev, "could not create sysctl root\n");
2097 return;
2098 }
2099
2100 if (sysctl_createv(log, 0, &rnode, &cnode,
2101 CTLFLAG_READWRITE, CTLTYPE_INT,
2102 "debug", SYSCTL_DESCR("Debug Info"),
2103 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2104 aprint_error_dev(dev, "could not create sysctl\n");
2105
2106 if (sysctl_createv(log, 0, &rnode, &cnode,
2107 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2108 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2109 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2110 aprint_error_dev(dev, "could not create sysctl\n");
2111 }
2112
2113 /************************************************************************
2114 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2115 ************************************************************************/
2116 static void
2117 ixv_add_stats_sysctls(struct adapter *adapter)
2118 {
2119 device_t dev = adapter->dev;
2120 struct tx_ring *txr = adapter->tx_rings;
2121 struct rx_ring *rxr = adapter->rx_rings;
2122 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2123 struct ixgbe_hw *hw = &adapter->hw;
2124 const struct sysctlnode *rnode;
2125 struct sysctllog **log = &adapter->sysctllog;
2126 const char *xname = device_xname(dev);
2127
2128 /* Driver Statistics */
2129 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2130 NULL, xname, "Handled queue in softint");
2131 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2132 NULL, xname, "Requeued in softint");
2133 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2134 NULL, xname, "Driver tx dma soft fail EFBIG");
2135 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2136 NULL, xname, "m_defrag() failed");
2137 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2138 NULL, xname, "Driver tx dma hard fail EFBIG");
2139 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2140 NULL, xname, "Driver tx dma hard fail EINVAL");
2141 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2142 NULL, xname, "Driver tx dma hard fail other");
2143 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2144 NULL, xname, "Driver tx dma soft fail EAGAIN");
2145 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2146 NULL, xname, "Driver tx dma soft fail ENOMEM");
2147 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2148 NULL, xname, "Watchdog timeouts");
2149 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2150 NULL, xname, "TSO errors");
2151 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2152 NULL, xname, "Link MSI-X IRQ Handled");
2153
2154 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2155 snprintf(adapter->queues[i].evnamebuf,
2156 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2157 xname, i);
2158 snprintf(adapter->queues[i].namebuf,
2159 sizeof(adapter->queues[i].namebuf), "q%d", i);
2160
2161 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2162 aprint_error_dev(dev, "could not create sysctl root\n");
2163 break;
2164 }
2165
2166 if (sysctl_createv(log, 0, &rnode, &rnode,
2167 0, CTLTYPE_NODE,
2168 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2169 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2170 break;
2171
2172 #if 0 /* not yet */
2173 if (sysctl_createv(log, 0, &rnode, &cnode,
2174 CTLFLAG_READWRITE, CTLTYPE_INT,
2175 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2176 ixgbe_sysctl_interrupt_rate_handler, 0,
2177 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2178 break;
2179
2180 if (sysctl_createv(log, 0, &rnode, &cnode,
2181 CTLFLAG_READONLY, CTLTYPE_QUAD,
2182 "irqs", SYSCTL_DESCR("irqs on this queue"),
2183 NULL, 0, &(adapter->queues[i].irqs),
2184 0, CTL_CREATE, CTL_EOL) != 0)
2185 break;
2186
2187 if (sysctl_createv(log, 0, &rnode, &cnode,
2188 CTLFLAG_READONLY, CTLTYPE_INT,
2189 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2190 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2191 0, CTL_CREATE, CTL_EOL) != 0)
2192 break;
2193
2194 if (sysctl_createv(log, 0, &rnode, &cnode,
2195 CTLFLAG_READONLY, CTLTYPE_INT,
2196 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2197 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2198 0, CTL_CREATE, CTL_EOL) != 0)
2199 break;
2200 #endif
2201 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2202 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2203 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2204 NULL, adapter->queues[i].evnamebuf, "TSO");
2205 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2206 NULL, adapter->queues[i].evnamebuf,
2207 "Queue No Descriptor Available");
2208 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2209 NULL, adapter->queues[i].evnamebuf,
2210 "Queue Packets Transmitted");
2211 #ifndef IXGBE_LEGACY_TX
2212 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2213 NULL, adapter->queues[i].evnamebuf,
2214 "Packets dropped in pcq");
2215 #endif
2216
2217 #ifdef LRO
2218 struct lro_ctrl *lro = &rxr->lro;
2219 #endif /* LRO */
2220
2221 #if 0 /* not yet */
2222 if (sysctl_createv(log, 0, &rnode, &cnode,
2223 CTLFLAG_READONLY,
2224 CTLTYPE_INT,
2225 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2226 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2227 CTL_CREATE, CTL_EOL) != 0)
2228 break;
2229
2230 if (sysctl_createv(log, 0, &rnode, &cnode,
2231 CTLFLAG_READONLY,
2232 CTLTYPE_INT,
2233 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2234 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2235 CTL_CREATE, CTL_EOL) != 0)
2236 break;
2237 #endif
2238
2239 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2240 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2241 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2242 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2243 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2244 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2245 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2246 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2247 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2248 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2249 #ifdef LRO
2250 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2251 CTLFLAG_RD, &lro->lro_queued, 0,
2252 "LRO Queued");
2253 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2254 CTLFLAG_RD, &lro->lro_flushed, 0,
2255 "LRO Flushed");
2256 #endif /* LRO */
2257 }
2258
2259 /* MAC stats get their own sub node */
2260
2261 snprintf(stats->namebuf,
2262 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2263
2264 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2265 stats->namebuf, "rx csum offload - IP");
2266 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2267 stats->namebuf, "rx csum offload - L4");
2268 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2269 stats->namebuf, "rx csum offload - IP bad");
2270 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2271 stats->namebuf, "rx csum offload - L4 bad");
2272
2273 /* Packet Reception Stats */
2274 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2275 xname, "Good Packets Received");
2276 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2277 xname, "Good Octets Received");
2278 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2279 xname, "Multicast Packets Received");
2280 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2281 xname, "Good Packets Transmitted");
2282 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2283 xname, "Good Octets Transmitted");
2284
2285 /* Mailbox Stats */
2286 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2287 xname, "message TXs");
2288 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2289 xname, "message RXs");
2290 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2291 xname, "ACKs");
2292 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2293 xname, "REQs");
2294 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2295 xname, "RSTs");
2296
2297 } /* ixv_add_stats_sysctls */
2298
2299 /************************************************************************
2300 * ixv_set_sysctl_value
2301 ************************************************************************/
2302 static void
2303 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2304 const char *description, int *limit, int value)
2305 {
2306 device_t dev = adapter->dev;
2307 struct sysctllog **log;
2308 const struct sysctlnode *rnode, *cnode;
2309
2310 log = &adapter->sysctllog;
2311 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2312 aprint_error_dev(dev, "could not create sysctl root\n");
2313 return;
2314 }
2315 if (sysctl_createv(log, 0, &rnode, &cnode,
2316 CTLFLAG_READWRITE, CTLTYPE_INT,
2317 name, SYSCTL_DESCR(description),
2318 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2319 aprint_error_dev(dev, "could not create sysctl\n");
2320 *limit = value;
2321 } /* ixv_set_sysctl_value */
2322
2323 /************************************************************************
2324 * ixv_print_debug_info
2325 *
2326 * Called only when em_display_debug_stats is enabled.
2327 * Provides a way to take a look at important statistics
2328 * maintained by the driver and hardware.
2329 ************************************************************************/
2330 static void
2331 ixv_print_debug_info(struct adapter *adapter)
2332 {
2333 device_t dev = adapter->dev;
2334 struct ixgbe_hw *hw = &adapter->hw;
2335 struct ix_queue *que = adapter->queues;
2336 struct rx_ring *rxr;
2337 struct tx_ring *txr;
2338 #ifdef LRO
2339 struct lro_ctrl *lro;
2340 #endif /* LRO */
2341
2342 device_printf(dev, "Error Byte Count = %u \n",
2343 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2344
2345 for (int i = 0; i < adapter->num_queues; i++, que++) {
2346 txr = que->txr;
2347 rxr = que->rxr;
2348 #ifdef LRO
2349 lro = &rxr->lro;
2350 #endif /* LRO */
2351 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2352 que->msix, (long)que->irqs.ev_count);
2353 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2354 rxr->me, (long long)rxr->rx_packets.ev_count);
2355 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2356 rxr->me, (long)rxr->rx_bytes.ev_count);
2357 #ifdef LRO
2358 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2359 rxr->me, (long long)lro->lro_queued);
2360 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2361 rxr->me, (long long)lro->lro_flushed);
2362 #endif /* LRO */
2363 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2364 txr->me, (long)txr->total_packets.ev_count);
2365 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2366 txr->me, (long)txr->no_desc_avail.ev_count);
2367 }
2368
2369 device_printf(dev, "MBX IRQ Handled: %lu\n",
2370 (long)adapter->link_irq.ev_count);
2371 } /* ixv_print_debug_info */
2372
2373 /************************************************************************
2374 * ixv_sysctl_debug
2375 ************************************************************************/
2376 static int
2377 ixv_sysctl_debug(SYSCTLFN_ARGS)
2378 {
2379 struct sysctlnode node;
2380 struct adapter *adapter;
2381 int error, result;
2382
2383 node = *rnode;
2384 node.sysctl_data = &result;
2385 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2386
2387 if (error || newp == NULL)
2388 return error;
2389
2390 if (result == 1) {
2391 adapter = (struct adapter *)node.sysctl_data;
2392 ixv_print_debug_info(adapter);
2393 }
2394
2395 return 0;
2396 } /* ixv_sysctl_debug */
2397
2398 /************************************************************************
2399 * ixv_init_device_features
2400 ************************************************************************/
2401 static void
2402 ixv_init_device_features(struct adapter *adapter)
2403 {
2404 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2405 | IXGBE_FEATURE_VF
2406 | IXGBE_FEATURE_RSS
2407 | IXGBE_FEATURE_LEGACY_TX;
2408
2409 /* A tad short on feature flags for VFs, atm. */
2410 switch (adapter->hw.mac.type) {
2411 case ixgbe_mac_82599_vf:
2412 break;
2413 case ixgbe_mac_X540_vf:
2414 break;
2415 case ixgbe_mac_X550_vf:
2416 case ixgbe_mac_X550EM_x_vf:
2417 case ixgbe_mac_X550EM_a_vf:
2418 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2419 break;
2420 default:
2421 break;
2422 }
2423
2424 /* Enabled by default... */
2425 /* Is a virtual function (VF) */
2426 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2427 adapter->feat_en |= IXGBE_FEATURE_VF;
2428 /* Netmap */
2429 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2430 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2431 /* Receive-Side Scaling (RSS) */
2432 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2433 adapter->feat_en |= IXGBE_FEATURE_RSS;
2434 /* Needs advanced context descriptor regardless of offloads req'd */
2435 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2436 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2437
2438 /* Enabled via sysctl... */
2439 /* Legacy (single queue) transmit */
2440 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2441 ixv_enable_legacy_tx)
2442 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2443 } /* ixv_init_device_features */
2444
2445 /************************************************************************
2446 * ixv_shutdown - Shutdown entry point
2447 ************************************************************************/
2448 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2449 static int
2450 ixv_shutdown(device_t dev)
2451 {
2452 struct adapter *adapter = device_private(dev);
2453 IXGBE_CORE_LOCK(adapter);
2454 ixv_stop(adapter);
2455 IXGBE_CORE_UNLOCK(adapter);
2456
2457 return (0);
2458 } /* ixv_shutdown */
2459 #endif
2460
2461 static int
2462 ixv_ifflags_cb(struct ethercom *ec)
2463 {
2464 struct ifnet *ifp = &ec->ec_if;
2465 struct adapter *adapter = ifp->if_softc;
2466 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2467
2468 IXGBE_CORE_LOCK(adapter);
2469
2470 if (change != 0)
2471 adapter->if_flags = ifp->if_flags;
2472
2473 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2474 rc = ENETRESET;
2475
2476 /* Set up VLAN support and filter */
2477 ixv_setup_vlan_support(adapter);
2478
2479 IXGBE_CORE_UNLOCK(adapter);
2480
2481 return rc;
2482 }
2483
2484
2485 /************************************************************************
2486 * ixv_ioctl - Ioctl entry point
2487 *
2488 * Called when the user wants to configure the interface.
2489 *
2490 * return 0 on success, positive on failure
2491 ************************************************************************/
2492 static int
2493 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2494 {
2495 struct adapter *adapter = ifp->if_softc;
2496 struct ifcapreq *ifcr = data;
2497 struct ifreq *ifr = data;
2498 int error = 0;
2499 int l4csum_en;
2500 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2501 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2502
2503 switch (command) {
2504 case SIOCSIFFLAGS:
2505 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2506 break;
2507 case SIOCADDMULTI:
2508 case SIOCDELMULTI:
2509 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2510 break;
2511 case SIOCSIFMEDIA:
2512 case SIOCGIFMEDIA:
2513 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2514 break;
2515 case SIOCSIFCAP:
2516 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2517 break;
2518 case SIOCSIFMTU:
2519 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2520 break;
2521 default:
2522 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2523 break;
2524 }
2525
2526 switch (command) {
2527 case SIOCSIFMEDIA:
2528 case SIOCGIFMEDIA:
2529 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2530 case SIOCSIFCAP:
2531 /* Layer-4 Rx checksum offload has to be turned on and
2532 * off as a unit.
2533 */
2534 l4csum_en = ifcr->ifcr_capenable & l4csum;
2535 if (l4csum_en != l4csum && l4csum_en != 0)
2536 return EINVAL;
2537 /*FALLTHROUGH*/
2538 case SIOCADDMULTI:
2539 case SIOCDELMULTI:
2540 case SIOCSIFFLAGS:
2541 case SIOCSIFMTU:
2542 default:
2543 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2544 return error;
2545 if ((ifp->if_flags & IFF_RUNNING) == 0)
2546 ;
2547 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2548 IXGBE_CORE_LOCK(adapter);
2549 ixv_init_locked(adapter);
2550 IXGBE_CORE_UNLOCK(adapter);
2551 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2552 /*
2553 * Multicast list has changed; set the hardware filter
2554 * accordingly.
2555 */
2556 IXGBE_CORE_LOCK(adapter);
2557 ixv_disable_intr(adapter);
2558 ixv_set_multi(adapter);
2559 ixv_enable_intr(adapter);
2560 IXGBE_CORE_UNLOCK(adapter);
2561 }
2562 return 0;
2563 }
2564 } /* ixv_ioctl */
2565
2566 /************************************************************************
2567 * ixv_init
2568 ************************************************************************/
2569 static int
2570 ixv_init(struct ifnet *ifp)
2571 {
2572 struct adapter *adapter = ifp->if_softc;
2573
2574 IXGBE_CORE_LOCK(adapter);
2575 ixv_init_locked(adapter);
2576 IXGBE_CORE_UNLOCK(adapter);
2577
2578 return 0;
2579 } /* ixv_init */
2580
2581
2582 /************************************************************************
2583 * ixv_handle_que
2584 ************************************************************************/
2585 static void
2586 ixv_handle_que(void *context)
2587 {
2588 struct ix_queue *que = context;
2589 struct adapter *adapter = que->adapter;
2590 struct tx_ring *txr = que->txr;
2591 struct ifnet *ifp = adapter->ifp;
2592 bool more;
2593
2594 adapter->handleq.ev_count++;
2595
2596 if (ifp->if_flags & IFF_RUNNING) {
2597 more = ixgbe_rxeof(que);
2598 IXGBE_TX_LOCK(txr);
2599 ixgbe_txeof(txr);
2600 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2601 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2602 ixgbe_mq_start_locked(ifp, txr);
2603 /* Only for queue 0 */
2604 /* NetBSD still needs this for CBQ */
2605 if ((&adapter->queues[0] == que)
2606 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2607 ixgbe_legacy_start_locked(ifp, txr);
2608 IXGBE_TX_UNLOCK(txr);
2609 if (more) {
2610 adapter->req.ev_count++;
2611 softint_schedule(que->que_si);
2612 return;
2613 }
2614 }
2615
2616 /* Re-enable this interrupt */
2617 ixv_enable_queue(adapter, que->msix);
2618
2619 return;
2620 } /* ixv_handle_que */
2621
2622 /************************************************************************
2623 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2624 ************************************************************************/
2625 static int
2626 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2627 {
2628 device_t dev = adapter->dev;
2629 struct ix_queue *que = adapter->queues;
2630 struct tx_ring *txr = adapter->tx_rings;
2631 int error, msix_ctrl, rid, vector = 0;
2632 pci_chipset_tag_t pc;
2633 pcitag_t tag;
2634 char intrbuf[PCI_INTRSTR_LEN];
2635 char intr_xname[32];
2636 const char *intrstr = NULL;
2637 kcpuset_t *affinity;
2638 int cpu_id = 0;
2639
2640 pc = adapter->osdep.pc;
2641 tag = adapter->osdep.tag;
2642
2643 adapter->osdep.nintrs = adapter->num_queues + 1;
2644 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2645 adapter->osdep.nintrs) != 0) {
2646 aprint_error_dev(dev,
2647 "failed to allocate MSI-X interrupt\n");
2648 return (ENXIO);
2649 }
2650
2651 kcpuset_create(&affinity, false);
2652 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2653 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2654 device_xname(dev), i);
2655 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2656 sizeof(intrbuf));
2657 #ifdef IXGBE_MPSAFE
2658 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2659 true);
2660 #endif
2661 /* Set the handler function */
2662 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2663 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2664 intr_xname);
2665 if (que->res == NULL) {
2666 pci_intr_release(pc, adapter->osdep.intrs,
2667 adapter->osdep.nintrs);
2668 aprint_error_dev(dev,
2669 "Failed to register QUE handler\n");
2670 kcpuset_destroy(affinity);
2671 return (ENXIO);
2672 }
2673 que->msix = vector;
2674 adapter->active_queues |= (u64)(1 << que->msix);
2675
2676 cpu_id = i;
2677 /* Round-robin affinity */
2678 kcpuset_zero(affinity);
2679 kcpuset_set(affinity, cpu_id % ncpu);
2680 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2681 NULL);
2682 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2683 intrstr);
2684 if (error == 0)
2685 aprint_normal(", bound queue %d to cpu %d\n",
2686 i, cpu_id % ncpu);
2687 else
2688 aprint_normal("\n");
2689
2690 #ifndef IXGBE_LEGACY_TX
2691 txr->txr_si
2692 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2693 ixgbe_deferred_mq_start, txr);
2694 #endif
2695 que->que_si
2696 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2697 ixv_handle_que, que);
2698 if (que->que_si == NULL) {
2699 aprint_error_dev(dev,
2700 "could not establish software interrupt\n");
2701 }
2702 }
2703
2704 /* and Mailbox */
2705 cpu_id++;
2706 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2707 adapter->vector = vector;
2708 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2709 sizeof(intrbuf));
2710 #ifdef IXGBE_MPSAFE
2711 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2712 true);
2713 #endif
2714 /* Set the mbx handler function */
2715 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2716 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2717 intr_xname);
2718 if (adapter->osdep.ihs[vector] == NULL) {
2719 adapter->res = NULL;
2720 aprint_error_dev(dev, "Failed to register LINK handler\n");
2721 kcpuset_destroy(affinity);
2722 return (ENXIO);
2723 }
2724 /* Round-robin affinity */
2725 kcpuset_zero(affinity);
2726 kcpuset_set(affinity, cpu_id % ncpu);
2727 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2728
2729 aprint_normal_dev(dev,
2730 "for link, interrupting at %s", intrstr);
2731 if (error == 0)
2732 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2733 else
2734 aprint_normal("\n");
2735
2736 /* Tasklets for Mailbox */
2737 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2738 ixv_handle_link, adapter);
2739 /*
2740 * Due to a broken design QEMU will fail to properly
2741 * enable the guest for MSI-X unless the vectors in
2742 * the table are all set up, so we must rewrite the
2743 * ENABLE in the MSI-X control register again at this
2744 * point to cause it to successfully initialize us.
2745 */
2746 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2747 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2748 rid += PCI_MSIX_CTL;
2749 msix_ctrl = pci_conf_read(pc, tag, rid);
2750 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2751 pci_conf_write(pc, tag, rid, msix_ctrl);
2752 }
2753
2754 kcpuset_destroy(affinity);
2755 return (0);
2756 } /* ixv_allocate_msix */
2757
2758 /************************************************************************
2759 * ixv_configure_interrupts - Setup MSI-X resources
2760 *
2761 * Note: The VF device MUST use MSI-X, there is no fallback.
2762 ************************************************************************/
2763 static int
2764 ixv_configure_interrupts(struct adapter *adapter)
2765 {
2766 device_t dev = adapter->dev;
2767 int want, queues, msgs;
2768
2769 /* Must have at least 2 MSI-X vectors */
2770 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2771 if (msgs < 2) {
2772 aprint_error_dev(dev, "MSIX config error\n");
2773 return (ENXIO);
2774 }
2775 msgs = MIN(msgs, IXG_MAX_NINTR);
2776
2777 /* Figure out a reasonable auto config value */
2778 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2779
2780 if (ixv_num_queues != 0)
2781 queues = ixv_num_queues;
2782 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
2783 queues = IXGBE_VF_MAX_TX_QUEUES;
2784
2785 /*
2786 * Want vectors for the queues,
2787 * plus an additional for mailbox.
2788 */
2789 want = queues + 1;
2790 if (msgs >= want)
2791 msgs = want;
2792 else {
2793 aprint_error_dev(dev,
2794 "MSI-X Configuration Problem, "
2795 "%d vectors but %d queues wanted!\n",
2796 msgs, want);
2797 return -1;
2798 }
2799
2800 adapter->msix_mem = (void *)1; /* XXX */
2801 aprint_normal_dev(dev,
2802 "Using MSI-X interrupts with %d vectors\n", msgs);
2803 adapter->num_queues = queues;
2804
2805 return (0);
2806 } /* ixv_configure_interrupts */
2807
2808
2809 /************************************************************************
2810 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2811 *
2812 * Done outside of interrupt context since the driver might sleep
2813 ************************************************************************/
2814 static void
2815 ixv_handle_link(void *context)
2816 {
2817 struct adapter *adapter = context;
2818
2819 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2820 &adapter->link_up, FALSE);
2821 ixv_update_link_status(adapter);
2822 } /* ixv_handle_link */
2823
2824 /************************************************************************
2825 * ixv_check_link - Used in the local timer to poll for link changes
2826 ************************************************************************/
2827 static void
2828 ixv_check_link(struct adapter *adapter)
2829 {
2830 adapter->hw.mac.get_link_status = TRUE;
2831
2832 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2833 &adapter->link_up, FALSE);
2834 ixv_update_link_status(adapter);
2835 } /* ixv_check_link */
2836