ixv.c revision 1.73 1 /*$NetBSD: ixv.c,v 1.73 2017/10/23 09:31:18 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121
122 static void ixv_setup_vlan_support(struct adapter *);
123 #if 0
124 static void ixv_register_vlan(void *, struct ifnet *, u16);
125 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
126 #endif
127
128 static void ixv_add_device_sysctls(struct adapter *);
129 static void ixv_save_stats(struct adapter *);
130 static void ixv_init_stats(struct adapter *);
131 static void ixv_update_stats(struct adapter *);
132 static void ixv_add_stats_sysctls(struct adapter *);
133 static void ixv_set_sysctl_value(struct adapter *, const char *,
134 const char *, int *, int);
135
136 /* The MSI-X Interrupt handlers */
137 static int ixv_msix_que(void *);
138 static int ixv_msix_mbx(void *);
139
140 /* Deferred interrupt tasklets */
141 static void ixv_handle_que(void *);
142 static void ixv_handle_link(void *);
143
144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
146
147 /************************************************************************
148 * FreeBSD Device Interface Entry Points
149 ************************************************************************/
150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
151 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
152 DVF_DETACH_SHUTDOWN);
153
154 #if 0
155 static driver_t ixv_driver = {
156 "ixv", ixv_methods, sizeof(struct adapter),
157 };
158
159 devclass_t ixv_devclass;
160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 #endif
164
165 /*
166 * TUNEABLE PARAMETERS:
167 */
168
169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
170 static int ixv_num_queues = 0;
171 #define TUNABLE_INT(__x, __y)
172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
173
174 /*
175 * AIM: Adaptive Interrupt Moderation
176 * which means that the interrupt rate
177 * is varied over time based on the
178 * traffic for that interrupt vector
179 */
180 static bool ixv_enable_aim = false;
181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
182
183 /* How many packets rxeof tries to clean at a time */
184 static int ixv_rx_process_limit = 256;
185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
186
187 /* How many packets txeof tries to clean at a time */
188 static int ixv_tx_process_limit = 256;
189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
190
191 /*
192 * Number of TX descriptors per ring,
193 * setting higher than RX as this seems
194 * the better performing choice.
195 */
196 static int ixv_txd = PERFORM_TXD;
197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
198
199 /* Number of RX descriptors per ring */
200 static int ixv_rxd = PERFORM_RXD;
201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
202
203 /* Legacy Transmit (single queue) */
204 static int ixv_enable_legacy_tx = 0;
205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
206
207 #ifdef NET_MPSAFE
208 #define IXGBE_MPSAFE 1
209 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
210 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
211 #else
212 #define IXGBE_CALLOUT_FLAGS 0
213 #define IXGBE_SOFTINFT_FLAGS 0
214 #endif
215
216 #if 0
217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
219 #endif
220
221 /************************************************************************
222 * ixv_probe - Device identification routine
223 *
224 * Determines if the driver should be loaded on
225 * adapter based on its PCI vendor/device ID.
226 *
227 * return BUS_PROBE_DEFAULT on success, positive on failure
228 ************************************************************************/
229 static int
230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
231 {
232 #ifdef __HAVE_PCI_MSI_MSIX
233 const struct pci_attach_args *pa = aux;
234
235 return (ixv_lookup(pa) != NULL) ? 1 : 0;
236 #else
237 return 0;
238 #endif
239 } /* ixv_probe */
240
241 static ixgbe_vendor_info_t *
242 ixv_lookup(const struct pci_attach_args *pa)
243 {
244 ixgbe_vendor_info_t *ent;
245 pcireg_t subid;
246
247 INIT_DEBUGOUT("ixv_lookup: begin");
248
249 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
250 return NULL;
251
252 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
253
254 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
255 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
256 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
257 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
258 (ent->subvendor_id == 0)) &&
259 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
260 (ent->subdevice_id == 0))) {
261 return ent;
262 }
263 }
264
265 return NULL;
266 }
267
268 /************************************************************************
269 * ixv_attach - Device initialization routine
270 *
271 * Called when the driver is being loaded.
272 * Identifies the type of hardware, allocates all resources
273 * and initializes the hardware.
274 *
275 * return 0 on success, positive on failure
276 ************************************************************************/
277 static void
278 ixv_attach(device_t parent, device_t dev, void *aux)
279 {
280 struct adapter *adapter;
281 struct ixgbe_hw *hw;
282 int error = 0;
283 pcireg_t id, subid;
284 ixgbe_vendor_info_t *ent;
285 const struct pci_attach_args *pa = aux;
286 const char *apivstr;
287 const char *str;
288 char buf[256];
289
290 INIT_DEBUGOUT("ixv_attach: begin");
291
292 /*
293 * Make sure BUSMASTER is set, on a VM under
294 * KVM it may not be and will break things.
295 */
296 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
297
298 /* Allocate, clear, and link in our adapter structure */
299 adapter = device_private(dev);
300 adapter->dev = dev;
301 adapter->hw.back = adapter;
302 hw = &adapter->hw;
303
304 adapter->init_locked = ixv_init_locked;
305 adapter->stop_locked = ixv_stop;
306
307 adapter->osdep.pc = pa->pa_pc;
308 adapter->osdep.tag = pa->pa_tag;
309 if (pci_dma64_available(pa))
310 adapter->osdep.dmat = pa->pa_dmat64;
311 else
312 adapter->osdep.dmat = pa->pa_dmat;
313 adapter->osdep.attached = false;
314
315 ent = ixv_lookup(pa);
316
317 KASSERT(ent != NULL);
318
319 aprint_normal(": %s, Version - %s\n",
320 ixv_strings[ent->index], ixv_driver_version);
321
322 /* Core Lock Init*/
323 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
324
325 /* Do base PCI setup - map BAR0 */
326 if (ixv_allocate_pci_resources(adapter, pa)) {
327 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
328 error = ENXIO;
329 goto err_out;
330 }
331
332 /* SYSCTL APIs */
333 ixv_add_device_sysctls(adapter);
334
335 /* Set up the timer callout */
336 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
337
338 /* Save off the information about this board */
339 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
340 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
341 hw->vendor_id = PCI_VENDOR(id);
342 hw->device_id = PCI_PRODUCT(id);
343 hw->revision_id =
344 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
345 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
346 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
347
348 /* A subset of set_mac_type */
349 switch (hw->device_id) {
350 case IXGBE_DEV_ID_82599_VF:
351 hw->mac.type = ixgbe_mac_82599_vf;
352 str = "82599 VF";
353 break;
354 case IXGBE_DEV_ID_X540_VF:
355 hw->mac.type = ixgbe_mac_X540_vf;
356 str = "X540 VF";
357 break;
358 case IXGBE_DEV_ID_X550_VF:
359 hw->mac.type = ixgbe_mac_X550_vf;
360 str = "X550 VF";
361 break;
362 case IXGBE_DEV_ID_X550EM_X_VF:
363 hw->mac.type = ixgbe_mac_X550EM_x_vf;
364 str = "X550EM X VF";
365 break;
366 case IXGBE_DEV_ID_X550EM_A_VF:
367 hw->mac.type = ixgbe_mac_X550EM_a_vf;
368 str = "X550EM A VF";
369 break;
370 default:
371 /* Shouldn't get here since probe succeeded */
372 aprint_error_dev(dev, "Unknown device ID!\n");
373 error = ENXIO;
374 goto err_out;
375 break;
376 }
377 aprint_normal_dev(dev, "device %s\n", str);
378
379 ixv_init_device_features(adapter);
380
381 /* Initialize the shared code */
382 error = ixgbe_init_ops_vf(hw);
383 if (error) {
384 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
385 error = EIO;
386 goto err_out;
387 }
388
389 /* Setup the mailbox */
390 ixgbe_init_mbx_params_vf(hw);
391
392 /* Set the right number of segments */
393 adapter->num_segs = IXGBE_82599_SCATTER;
394
395 /* Reset mbox api to 1.0 */
396 error = hw->mac.ops.reset_hw(hw);
397 if (error == IXGBE_ERR_RESET_FAILED)
398 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
399 else if (error)
400 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
401 error);
402 if (error) {
403 error = EIO;
404 goto err_out;
405 }
406
407 error = hw->mac.ops.init_hw(hw);
408 if (error) {
409 aprint_error_dev(dev, "...init_hw() failed!\n");
410 error = EIO;
411 goto err_out;
412 }
413
414 /* Negotiate mailbox API version */
415 error = ixv_negotiate_api(adapter);
416 if (error)
417 aprint_normal_dev(dev,
418 "MBX API negotiation failed during attach!\n");
419 switch (hw->api_version) {
420 case ixgbe_mbox_api_10:
421 apivstr = "1.0";
422 break;
423 case ixgbe_mbox_api_20:
424 apivstr = "2.0";
425 break;
426 case ixgbe_mbox_api_11:
427 apivstr = "1.1";
428 break;
429 case ixgbe_mbox_api_12:
430 apivstr = "1.2";
431 break;
432 case ixgbe_mbox_api_13:
433 apivstr = "1.3";
434 break;
435 default:
436 apivstr = "unknown";
437 break;
438 }
439 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
440
441 /* If no mac address was assigned, make a random one */
442 if (!ixv_check_ether_addr(hw->mac.addr)) {
443 u8 addr[ETHER_ADDR_LEN];
444 uint64_t rndval = cprng_strong64();
445
446 memcpy(addr, &rndval, sizeof(addr));
447 addr[0] &= 0xFE;
448 addr[0] |= 0x02;
449 bcopy(addr, hw->mac.addr, sizeof(addr));
450 }
451
452 /* Register for VLAN events */
453 #if 0 /* XXX delete after write? */
454 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
455 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
456 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
457 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
458 #endif
459
460 /* Sysctls for limiting the amount of work done in the taskqueues */
461 ixv_set_sysctl_value(adapter, "rx_processing_limit",
462 "max number of rx packets to process",
463 &adapter->rx_process_limit, ixv_rx_process_limit);
464
465 ixv_set_sysctl_value(adapter, "tx_processing_limit",
466 "max number of tx packets to process",
467 &adapter->tx_process_limit, ixv_tx_process_limit);
468
469 /* Do descriptor calc and sanity checks */
470 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
471 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
472 aprint_error_dev(dev, "TXD config issue, using default!\n");
473 adapter->num_tx_desc = DEFAULT_TXD;
474 } else
475 adapter->num_tx_desc = ixv_txd;
476
477 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
478 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
479 aprint_error_dev(dev, "RXD config issue, using default!\n");
480 adapter->num_rx_desc = DEFAULT_RXD;
481 } else
482 adapter->num_rx_desc = ixv_rxd;
483
484 /* Setup MSI-X */
485 error = ixv_configure_interrupts(adapter);
486 if (error)
487 goto err_out;
488
489 /* Allocate our TX/RX Queues */
490 if (ixgbe_allocate_queues(adapter)) {
491 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
492 error = ENOMEM;
493 goto err_out;
494 }
495
496 /* hw.ix defaults init */
497 adapter->enable_aim = ixv_enable_aim;
498
499 /* Setup OS specific network interface */
500 error = ixv_setup_interface(dev, adapter);
501 if (error != 0) {
502 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
503 goto err_late;
504 }
505
506 error = ixv_allocate_msix(adapter, pa);
507 if (error) {
508 device_printf(dev, "ixv_allocate_msix() failed!\n");
509 goto err_late;
510 }
511
512 /* Do the stats setup */
513 ixv_save_stats(adapter);
514 ixv_init_stats(adapter);
515 ixv_add_stats_sysctls(adapter);
516
517 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
518 ixgbe_netmap_attach(adapter);
519
520 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
521 aprint_verbose_dev(dev, "feature cap %s\n", buf);
522 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
523 aprint_verbose_dev(dev, "feature ena %s\n", buf);
524
525 INIT_DEBUGOUT("ixv_attach: end");
526 adapter->osdep.attached = true;
527
528 return;
529
530 err_late:
531 ixgbe_free_transmit_structures(adapter);
532 ixgbe_free_receive_structures(adapter);
533 free(adapter->queues, M_DEVBUF);
534 err_out:
535 ixv_free_pci_resources(adapter);
536 IXGBE_CORE_LOCK_DESTROY(adapter);
537
538 return;
539 } /* ixv_attach */
540
541 /************************************************************************
542 * ixv_detach - Device removal routine
543 *
544 * Called when the driver is being removed.
545 * Stops the adapter and deallocates all the resources
546 * that were allocated for driver operation.
547 *
548 * return 0 on success, positive on failure
549 ************************************************************************/
550 static int
551 ixv_detach(device_t dev, int flags)
552 {
553 struct adapter *adapter = device_private(dev);
554 struct ixgbe_hw *hw = &adapter->hw;
555 struct ix_queue *que = adapter->queues;
556 struct tx_ring *txr = adapter->tx_rings;
557 struct rx_ring *rxr = adapter->rx_rings;
558 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
559
560 INIT_DEBUGOUT("ixv_detach: begin");
561 if (adapter->osdep.attached == false)
562 return 0;
563
564 /* Stop the interface. Callouts are stopped in it. */
565 ixv_ifstop(adapter->ifp, 1);
566
567 #if NVLAN > 0
568 /* Make sure VLANs are not using driver */
569 if (!VLAN_ATTACHED(&adapter->osdep.ec))
570 ; /* nothing to do: no VLANs */
571 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
572 vlan_ifdetach(adapter->ifp);
573 else {
574 aprint_error_dev(dev, "VLANs in use, detach first\n");
575 return EBUSY;
576 }
577 #endif
578
579 IXGBE_CORE_LOCK(adapter);
580 ixv_stop(adapter);
581 IXGBE_CORE_UNLOCK(adapter);
582
583 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
584 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
585 softint_disestablish(txr->txr_si);
586 softint_disestablish(que->que_si);
587 }
588
589 /* Drain the Mailbox(link) queue */
590 softint_disestablish(adapter->link_si);
591
592 /* Unregister VLAN events */
593 #if 0 /* XXX msaitoh delete after write? */
594 if (adapter->vlan_attach != NULL)
595 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
596 if (adapter->vlan_detach != NULL)
597 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
598 #endif
599
600 ether_ifdetach(adapter->ifp);
601 callout_halt(&adapter->timer, NULL);
602
603 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
604 netmap_detach(adapter->ifp);
605
606 ixv_free_pci_resources(adapter);
607 #if 0 /* XXX the NetBSD port is probably missing something here */
608 bus_generic_detach(dev);
609 #endif
610 if_detach(adapter->ifp);
611 if_percpuq_destroy(adapter->ipq);
612
613 sysctl_teardown(&adapter->sysctllog);
614 evcnt_detach(&adapter->handleq);
615 evcnt_detach(&adapter->req);
616 evcnt_detach(&adapter->efbig_tx_dma_setup);
617 evcnt_detach(&adapter->mbuf_defrag_failed);
618 evcnt_detach(&adapter->efbig2_tx_dma_setup);
619 evcnt_detach(&adapter->einval_tx_dma_setup);
620 evcnt_detach(&adapter->other_tx_dma_setup);
621 evcnt_detach(&adapter->eagain_tx_dma_setup);
622 evcnt_detach(&adapter->enomem_tx_dma_setup);
623 evcnt_detach(&adapter->watchdog_events);
624 evcnt_detach(&adapter->tso_err);
625 evcnt_detach(&adapter->link_irq);
626
627 txr = adapter->tx_rings;
628 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
629 evcnt_detach(&adapter->queues[i].irqs);
630 evcnt_detach(&txr->no_desc_avail);
631 evcnt_detach(&txr->total_packets);
632 evcnt_detach(&txr->tso_tx);
633 #ifndef IXGBE_LEGACY_TX
634 evcnt_detach(&txr->pcq_drops);
635 #endif
636
637 evcnt_detach(&rxr->rx_packets);
638 evcnt_detach(&rxr->rx_bytes);
639 evcnt_detach(&rxr->rx_copies);
640 evcnt_detach(&rxr->no_jmbuf);
641 evcnt_detach(&rxr->rx_discarded);
642 }
643 evcnt_detach(&stats->ipcs);
644 evcnt_detach(&stats->l4cs);
645 evcnt_detach(&stats->ipcs_bad);
646 evcnt_detach(&stats->l4cs_bad);
647
648 /* Packet Reception Stats */
649 evcnt_detach(&stats->vfgorc);
650 evcnt_detach(&stats->vfgprc);
651 evcnt_detach(&stats->vfmprc);
652
653 /* Packet Transmission Stats */
654 evcnt_detach(&stats->vfgotc);
655 evcnt_detach(&stats->vfgptc);
656
657 /* Mailbox Stats */
658 evcnt_detach(&hw->mbx.stats.msgs_tx);
659 evcnt_detach(&hw->mbx.stats.msgs_rx);
660 evcnt_detach(&hw->mbx.stats.acks);
661 evcnt_detach(&hw->mbx.stats.reqs);
662 evcnt_detach(&hw->mbx.stats.rsts);
663
664 ixgbe_free_transmit_structures(adapter);
665 ixgbe_free_receive_structures(adapter);
666 free(adapter->queues, M_DEVBUF);
667
668 IXGBE_CORE_LOCK_DESTROY(adapter);
669
670 return (0);
671 } /* ixv_detach */
672
673 /************************************************************************
674 * ixv_init_locked - Init entry point
675 *
676 * Used in two ways: It is used by the stack as an init entry
677 * point in network interface structure. It is also used
678 * by the driver as a hw/sw initialization routine to get
679 * to a consistent state.
680 *
681 * return 0 on success, positive on failure
682 ************************************************************************/
683 static void
684 ixv_init_locked(struct adapter *adapter)
685 {
686 struct ifnet *ifp = adapter->ifp;
687 device_t dev = adapter->dev;
688 struct ixgbe_hw *hw = &adapter->hw;
689 struct ix_queue *que = adapter->queues;
690 int error = 0;
691 uint32_t mask;
692 int i;
693
694 INIT_DEBUGOUT("ixv_init_locked: begin");
695 KASSERT(mutex_owned(&adapter->core_mtx));
696 hw->adapter_stopped = FALSE;
697 hw->mac.ops.stop_adapter(hw);
698 callout_stop(&adapter->timer);
699
700 /* reprogram the RAR[0] in case user changed it. */
701 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
702
703 /* Get the latest mac address, User can use a LAA */
704 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
705 IXGBE_ETH_LENGTH_OF_ADDRESS);
706 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
707
708 /* Prepare transmit descriptors and buffers */
709 if (ixgbe_setup_transmit_structures(adapter)) {
710 aprint_error_dev(dev, "Could not setup transmit structures\n");
711 ixv_stop(adapter);
712 return;
713 }
714
715 /* Reset VF and renegotiate mailbox API version */
716 hw->mac.ops.reset_hw(hw);
717 error = ixv_negotiate_api(adapter);
718 if (error)
719 device_printf(dev,
720 "Mailbox API negotiation failed in init_locked!\n");
721
722 ixv_initialize_transmit_units(adapter);
723
724 /* Setup Multicast table */
725 ixv_set_multi(adapter);
726
727 /*
728 * Determine the correct mbuf pool
729 * for doing jumbo/headersplit
730 */
731 if (ifp->if_mtu > ETHERMTU)
732 adapter->rx_mbuf_sz = MJUMPAGESIZE;
733 else
734 adapter->rx_mbuf_sz = MCLBYTES;
735
736 /* Prepare receive descriptors and buffers */
737 if (ixgbe_setup_receive_structures(adapter)) {
738 device_printf(dev, "Could not setup receive structures\n");
739 ixv_stop(adapter);
740 return;
741 }
742
743 /* Configure RX settings */
744 ixv_initialize_receive_units(adapter);
745
746 #if 0 /* XXX isn't it required? -- msaitoh */
747 /* Set the various hardware offload abilities */
748 ifp->if_hwassist = 0;
749 if (ifp->if_capenable & IFCAP_TSO4)
750 ifp->if_hwassist |= CSUM_TSO;
751 if (ifp->if_capenable & IFCAP_TXCSUM) {
752 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
753 #if __FreeBSD_version >= 800000
754 ifp->if_hwassist |= CSUM_SCTP;
755 #endif
756 }
757 #endif
758
759 /* Set up VLAN offload and filter */
760 ixv_setup_vlan_support(adapter);
761
762 /* Set up MSI-X routing */
763 ixv_configure_ivars(adapter);
764
765 /* Set up auto-mask */
766 mask = (1 << adapter->vector);
767 for (i = 0; i < adapter->num_queues; i++, que++)
768 mask |= (1 << que->msix);
769 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
770
771 /* Set moderation on the Link interrupt */
772 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
773
774 /* Stats init */
775 ixv_init_stats(adapter);
776
777 /* Config/Enable Link */
778 hw->mac.get_link_status = TRUE;
779 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
780 FALSE);
781
782 /* Start watchdog */
783 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
784
785 /* And now turn on interrupts */
786 ixv_enable_intr(adapter);
787
788 /* Now inform the stack we're ready */
789 ifp->if_flags |= IFF_RUNNING;
790 ifp->if_flags &= ~IFF_OACTIVE;
791
792 return;
793 } /* ixv_init_locked */
794
795 /*
796 * MSI-X Interrupt Handlers and Tasklets
797 */
798
799 static inline void
800 ixv_enable_queue(struct adapter *adapter, u32 vector)
801 {
802 struct ixgbe_hw *hw = &adapter->hw;
803 u32 queue = 1 << vector;
804 u32 mask;
805
806 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
807 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
808 } /* ixv_enable_queue */
809
810 static inline void
811 ixv_disable_queue(struct adapter *adapter, u32 vector)
812 {
813 struct ixgbe_hw *hw = &adapter->hw;
814 u64 queue = (u64)(1 << vector);
815 u32 mask;
816
817 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
818 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
819 } /* ixv_disable_queue */
820
821 static inline void
822 ixv_rearm_queues(struct adapter *adapter, u64 queues)
823 {
824 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
825 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
826 } /* ixv_rearm_queues */
827
828
829 /************************************************************************
830 * ixv_msix_que - MSI Queue Interrupt Service routine
831 ************************************************************************/
832 static int
833 ixv_msix_que(void *arg)
834 {
835 struct ix_queue *que = arg;
836 struct adapter *adapter = que->adapter;
837 struct tx_ring *txr = que->txr;
838 struct rx_ring *rxr = que->rxr;
839 bool more;
840 u32 newitr = 0;
841
842 ixv_disable_queue(adapter, que->msix);
843 ++que->irqs.ev_count;
844
845 #ifdef __NetBSD__
846 /* Don't run ixgbe_rxeof in interrupt context */
847 more = true;
848 #else
849 more = ixgbe_rxeof(que);
850 #endif
851
852 IXGBE_TX_LOCK(txr);
853 ixgbe_txeof(txr);
854 IXGBE_TX_UNLOCK(txr);
855
856 /* Do AIM now? */
857
858 if (adapter->enable_aim == false)
859 goto no_calc;
860 /*
861 * Do Adaptive Interrupt Moderation:
862 * - Write out last calculated setting
863 * - Calculate based on average size over
864 * the last interval.
865 */
866 if (que->eitr_setting)
867 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
868 que->eitr_setting);
869
870 que->eitr_setting = 0;
871
872 /* Idle, do nothing */
873 if ((txr->bytes == 0) && (rxr->bytes == 0))
874 goto no_calc;
875
876 if ((txr->bytes) && (txr->packets))
877 newitr = txr->bytes/txr->packets;
878 if ((rxr->bytes) && (rxr->packets))
879 newitr = max(newitr, (rxr->bytes / rxr->packets));
880 newitr += 24; /* account for hardware frame, crc */
881
882 /* set an upper boundary */
883 newitr = min(newitr, 3000);
884
885 /* Be nice to the mid range */
886 if ((newitr > 300) && (newitr < 1200))
887 newitr = (newitr / 3);
888 else
889 newitr = (newitr / 2);
890
891 newitr |= newitr << 16;
892
893 /* save for next interrupt */
894 que->eitr_setting = newitr;
895
896 /* Reset state */
897 txr->bytes = 0;
898 txr->packets = 0;
899 rxr->bytes = 0;
900 rxr->packets = 0;
901
902 no_calc:
903 if (more)
904 softint_schedule(que->que_si);
905 else /* Re-enable this interrupt */
906 ixv_enable_queue(adapter, que->msix);
907
908 return 1;
909 } /* ixv_msix_que */
910
911 /************************************************************************
912 * ixv_msix_mbx
913 ************************************************************************/
914 static int
915 ixv_msix_mbx(void *arg)
916 {
917 struct adapter *adapter = arg;
918 struct ixgbe_hw *hw = &adapter->hw;
919
920 ++adapter->link_irq.ev_count;
921 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
922
923 /* Link status change */
924 hw->mac.get_link_status = TRUE;
925 softint_schedule(adapter->link_si);
926
927 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
928
929 return 1;
930 } /* ixv_msix_mbx */
931
932 /************************************************************************
933 * ixv_media_status - Media Ioctl callback
934 *
935 * Called whenever the user queries the status of
936 * the interface using ifconfig.
937 ************************************************************************/
938 static void
939 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
940 {
941 struct adapter *adapter = ifp->if_softc;
942
943 INIT_DEBUGOUT("ixv_media_status: begin");
944 IXGBE_CORE_LOCK(adapter);
945 ixv_update_link_status(adapter);
946
947 ifmr->ifm_status = IFM_AVALID;
948 ifmr->ifm_active = IFM_ETHER;
949
950 if (!adapter->link_active) {
951 ifmr->ifm_active |= IFM_NONE;
952 IXGBE_CORE_UNLOCK(adapter);
953 return;
954 }
955
956 ifmr->ifm_status |= IFM_ACTIVE;
957
958 switch (adapter->link_speed) {
959 case IXGBE_LINK_SPEED_10GB_FULL:
960 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
961 break;
962 case IXGBE_LINK_SPEED_5GB_FULL:
963 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
964 break;
965 case IXGBE_LINK_SPEED_2_5GB_FULL:
966 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
967 break;
968 case IXGBE_LINK_SPEED_1GB_FULL:
969 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
970 break;
971 case IXGBE_LINK_SPEED_100_FULL:
972 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
973 break;
974 case IXGBE_LINK_SPEED_10_FULL:
975 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
976 break;
977 }
978
979 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
980
981 IXGBE_CORE_UNLOCK(adapter);
982
983 return;
984 } /* ixv_media_status */
985
986 /************************************************************************
987 * ixv_media_change - Media Ioctl callback
988 *
989 * Called when the user changes speed/duplex using
990 * media/mediopt option with ifconfig.
991 ************************************************************************/
992 static int
993 ixv_media_change(struct ifnet *ifp)
994 {
995 struct adapter *adapter = ifp->if_softc;
996 struct ifmedia *ifm = &adapter->media;
997
998 INIT_DEBUGOUT("ixv_media_change: begin");
999
1000 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1001 return (EINVAL);
1002
1003 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1004 case IFM_AUTO:
1005 break;
1006 default:
1007 device_printf(adapter->dev, "Only auto media type\n");
1008 return (EINVAL);
1009 }
1010
1011 return (0);
1012 } /* ixv_media_change */
1013
1014
1015 /************************************************************************
1016 * ixv_negotiate_api
1017 *
1018 * Negotiate the Mailbox API with the PF;
1019 * start with the most featured API first.
1020 ************************************************************************/
1021 static int
1022 ixv_negotiate_api(struct adapter *adapter)
1023 {
1024 struct ixgbe_hw *hw = &adapter->hw;
1025 int mbx_api[] = { ixgbe_mbox_api_11,
1026 ixgbe_mbox_api_10,
1027 ixgbe_mbox_api_unknown };
1028 int i = 0;
1029
1030 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1031 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1032 return (0);
1033 i++;
1034 }
1035
1036 return (EINVAL);
1037 } /* ixv_negotiate_api */
1038
1039
1040 /************************************************************************
1041 * ixv_set_multi - Multicast Update
1042 *
1043 * Called whenever multicast address list is updated.
1044 ************************************************************************/
1045 static void
1046 ixv_set_multi(struct adapter *adapter)
1047 {
1048 struct ether_multi *enm;
1049 struct ether_multistep step;
1050 struct ethercom *ec = &adapter->osdep.ec;
1051 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1052 u8 *update_ptr;
1053 int mcnt = 0;
1054
1055 KASSERT(mutex_owned(&adapter->core_mtx));
1056 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1057
1058 ETHER_LOCK(ec);
1059 ETHER_FIRST_MULTI(step, ec, enm);
1060 while (enm != NULL) {
1061 bcopy(enm->enm_addrlo,
1062 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1063 IXGBE_ETH_LENGTH_OF_ADDRESS);
1064 mcnt++;
1065 /* XXX This might be required --msaitoh */
1066 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1067 break;
1068 ETHER_NEXT_MULTI(step, enm);
1069 }
1070 ETHER_UNLOCK(ec);
1071
1072 update_ptr = mta;
1073
1074 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1075 ixv_mc_array_itr, TRUE);
1076
1077 return;
1078 } /* ixv_set_multi */
1079
1080 /************************************************************************
1081 * ixv_mc_array_itr
1082 *
1083 * An iterator function needed by the multicast shared code.
1084 * It feeds the shared code routine the addresses in the
1085 * array of ixv_set_multi() one by one.
1086 ************************************************************************/
1087 static u8 *
1088 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1089 {
1090 u8 *addr = *update_ptr;
1091 u8 *newptr;
1092 *vmdq = 0;
1093
1094 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1095 *update_ptr = newptr;
1096
1097 return addr;
1098 } /* ixv_mc_array_itr */
1099
1100 /************************************************************************
1101 * ixv_local_timer - Timer routine
1102 *
1103 * Checks for link status, updates statistics,
1104 * and runs the watchdog check.
1105 ************************************************************************/
1106 static void
1107 ixv_local_timer(void *arg)
1108 {
1109 struct adapter *adapter = arg;
1110
1111 IXGBE_CORE_LOCK(adapter);
1112 ixv_local_timer_locked(adapter);
1113 IXGBE_CORE_UNLOCK(adapter);
1114 }
1115
1116 static void
1117 ixv_local_timer_locked(void *arg)
1118 {
1119 struct adapter *adapter = arg;
1120 device_t dev = adapter->dev;
1121 struct ix_queue *que = adapter->queues;
1122 u64 queues = 0;
1123 int hung = 0;
1124
1125 KASSERT(mutex_owned(&adapter->core_mtx));
1126
1127 ixv_check_link(adapter);
1128
1129 /* Stats Update */
1130 ixv_update_stats(adapter);
1131
1132 /*
1133 * Check the TX queues status
1134 * - mark hung queues so we don't schedule on them
1135 * - watchdog only if all queues show hung
1136 */
1137 for (int i = 0; i < adapter->num_queues; i++, que++) {
1138 /* Keep track of queues with work for soft irq */
1139 if (que->txr->busy)
1140 queues |= ((u64)1 << que->me);
1141 /*
1142 * Each time txeof runs without cleaning, but there
1143 * are uncleaned descriptors it increments busy. If
1144 * we get to the MAX we declare it hung.
1145 */
1146 if (que->busy == IXGBE_QUEUE_HUNG) {
1147 ++hung;
1148 /* Mark the queue as inactive */
1149 adapter->active_queues &= ~((u64)1 << que->me);
1150 continue;
1151 } else {
1152 /* Check if we've come back from hung */
1153 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1154 adapter->active_queues |= ((u64)1 << que->me);
1155 }
1156 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1157 device_printf(dev,
1158 "Warning queue %d appears to be hung!\n", i);
1159 que->txr->busy = IXGBE_QUEUE_HUNG;
1160 ++hung;
1161 }
1162 }
1163
1164 /* Only truly watchdog if all queues show hung */
1165 if (hung == adapter->num_queues)
1166 goto watchdog;
1167 else if (queues != 0) { /* Force an IRQ on queues with work */
1168 ixv_rearm_queues(adapter, queues);
1169 }
1170
1171 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1172
1173 return;
1174
1175 watchdog:
1176
1177 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1178 adapter->ifp->if_flags &= ~IFF_RUNNING;
1179 adapter->watchdog_events.ev_count++;
1180 ixv_init_locked(adapter);
1181 } /* ixv_local_timer */
1182
1183 /************************************************************************
1184 * ixv_update_link_status - Update OS on link state
1185 *
1186 * Note: Only updates the OS on the cached link state.
1187 * The real check of the hardware only happens with
1188 * a link interrupt.
1189 ************************************************************************/
1190 static void
1191 ixv_update_link_status(struct adapter *adapter)
1192 {
1193 struct ifnet *ifp = adapter->ifp;
1194 device_t dev = adapter->dev;
1195
1196 if (adapter->link_up) {
1197 if (adapter->link_active == FALSE) {
1198 if (bootverbose) {
1199 const char *bpsmsg;
1200
1201 switch (adapter->link_speed) {
1202 case IXGBE_LINK_SPEED_10GB_FULL:
1203 bpsmsg = "10 Gbps";
1204 break;
1205 case IXGBE_LINK_SPEED_5GB_FULL:
1206 bpsmsg = "5 Gbps";
1207 break;
1208 case IXGBE_LINK_SPEED_2_5GB_FULL:
1209 bpsmsg = "2.5 Gbps";
1210 break;
1211 case IXGBE_LINK_SPEED_1GB_FULL:
1212 bpsmsg = "1 Gbps";
1213 break;
1214 case IXGBE_LINK_SPEED_100_FULL:
1215 bpsmsg = "100 Mbps";
1216 break;
1217 case IXGBE_LINK_SPEED_10_FULL:
1218 bpsmsg = "10 Mbps";
1219 break;
1220 default:
1221 bpsmsg = "unknown speed";
1222 break;
1223 }
1224 device_printf(dev, "Link is up %s %s \n",
1225 bpsmsg, "Full Duplex");
1226 }
1227 adapter->link_active = TRUE;
1228 if_link_state_change(ifp, LINK_STATE_UP);
1229 }
1230 } else { /* Link down */
1231 if (adapter->link_active == TRUE) {
1232 if (bootverbose)
1233 device_printf(dev, "Link is Down\n");
1234 if_link_state_change(ifp, LINK_STATE_DOWN);
1235 adapter->link_active = FALSE;
1236 }
1237 }
1238
1239 return;
1240 } /* ixv_update_link_status */
1241
1242
1243 /************************************************************************
1244 * ixv_stop - Stop the hardware
1245 *
1246 * Disables all traffic on the adapter by issuing a
1247 * global reset on the MAC and deallocates TX/RX buffers.
1248 ************************************************************************/
1249 static void
1250 ixv_ifstop(struct ifnet *ifp, int disable)
1251 {
1252 struct adapter *adapter = ifp->if_softc;
1253
1254 IXGBE_CORE_LOCK(adapter);
1255 ixv_stop(adapter);
1256 IXGBE_CORE_UNLOCK(adapter);
1257 }
1258
1259 static void
1260 ixv_stop(void *arg)
1261 {
1262 struct ifnet *ifp;
1263 struct adapter *adapter = arg;
1264 struct ixgbe_hw *hw = &adapter->hw;
1265
1266 ifp = adapter->ifp;
1267
1268 KASSERT(mutex_owned(&adapter->core_mtx));
1269
1270 INIT_DEBUGOUT("ixv_stop: begin\n");
1271 ixv_disable_intr(adapter);
1272
1273 /* Tell the stack that the interface is no longer active */
1274 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1275
1276 hw->mac.ops.reset_hw(hw);
1277 adapter->hw.adapter_stopped = FALSE;
1278 hw->mac.ops.stop_adapter(hw);
1279 callout_stop(&adapter->timer);
1280
1281 /* reprogram the RAR[0] in case user changed it. */
1282 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1283
1284 return;
1285 } /* ixv_stop */
1286
1287
1288 /************************************************************************
1289 * ixv_allocate_pci_resources
1290 ************************************************************************/
1291 static int
1292 ixv_allocate_pci_resources(struct adapter *adapter,
1293 const struct pci_attach_args *pa)
1294 {
1295 pcireg_t memtype;
1296 device_t dev = adapter->dev;
1297 bus_addr_t addr;
1298 int flags;
1299
1300 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1301 switch (memtype) {
1302 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1303 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1304 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1305 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1306 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1307 goto map_err;
1308 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1309 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1310 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1311 }
1312 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1313 adapter->osdep.mem_size, flags,
1314 &adapter->osdep.mem_bus_space_handle) != 0) {
1315 map_err:
1316 adapter->osdep.mem_size = 0;
1317 aprint_error_dev(dev, "unable to map BAR0\n");
1318 return ENXIO;
1319 }
1320 break;
1321 default:
1322 aprint_error_dev(dev, "unexpected type on BAR0\n");
1323 return ENXIO;
1324 }
1325
1326 /* Pick up the tuneable queues */
1327 adapter->num_queues = ixv_num_queues;
1328
1329 return (0);
1330 } /* ixv_allocate_pci_resources */
1331
1332 /************************************************************************
1333 * ixv_free_pci_resources
1334 ************************************************************************/
1335 static void
1336 ixv_free_pci_resources(struct adapter * adapter)
1337 {
1338 struct ix_queue *que = adapter->queues;
1339 int rid;
1340
1341 /*
1342 * Release all msix queue resources:
1343 */
1344 for (int i = 0; i < adapter->num_queues; i++, que++) {
1345 if (que->res != NULL)
1346 pci_intr_disestablish(adapter->osdep.pc,
1347 adapter->osdep.ihs[i]);
1348 }
1349
1350
1351 /* Clean the Mailbox interrupt last */
1352 rid = adapter->vector;
1353
1354 if (adapter->osdep.ihs[rid] != NULL) {
1355 pci_intr_disestablish(adapter->osdep.pc,
1356 adapter->osdep.ihs[rid]);
1357 adapter->osdep.ihs[rid] = NULL;
1358 }
1359
1360 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1361 adapter->osdep.nintrs);
1362
1363 if (adapter->osdep.mem_size != 0) {
1364 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1365 adapter->osdep.mem_bus_space_handle,
1366 adapter->osdep.mem_size);
1367 }
1368
1369 return;
1370 } /* ixv_free_pci_resources */
1371
1372 /************************************************************************
1373 * ixv_setup_interface
1374 *
1375 * Setup networking device structure and register an interface.
1376 ************************************************************************/
1377 static int
1378 ixv_setup_interface(device_t dev, struct adapter *adapter)
1379 {
1380 struct ethercom *ec = &adapter->osdep.ec;
1381 struct ifnet *ifp;
1382 int rv;
1383
1384 INIT_DEBUGOUT("ixv_setup_interface: begin");
1385
1386 ifp = adapter->ifp = &ec->ec_if;
1387 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1388 ifp->if_baudrate = IF_Gbps(10);
1389 ifp->if_init = ixv_init;
1390 ifp->if_stop = ixv_ifstop;
1391 ifp->if_softc = adapter;
1392 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1393 #ifdef IXGBE_MPSAFE
1394 ifp->if_extflags = IFEF_START_MPSAFE;
1395 #endif
1396 ifp->if_ioctl = ixv_ioctl;
1397 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1398 #if 0
1399 ixv_start_locked = ixgbe_legacy_start_locked;
1400 #endif
1401 } else {
1402 ifp->if_transmit = ixgbe_mq_start;
1403 #if 0
1404 ixv_start_locked = ixgbe_mq_start_locked;
1405 #endif
1406 }
1407 ifp->if_start = ixgbe_legacy_start;
1408 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1409 IFQ_SET_READY(&ifp->if_snd);
1410
1411 rv = if_initialize(ifp);
1412 if (rv != 0) {
1413 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1414 return rv;
1415 }
1416 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1417 ether_ifattach(ifp, adapter->hw.mac.addr);
1418 /*
1419 * We use per TX queue softint, so if_deferred_start_init() isn't
1420 * used.
1421 */
1422 if_register(ifp);
1423 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1424
1425 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1426
1427 /*
1428 * Tell the upper layer(s) we support long frames.
1429 */
1430 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1431
1432 /* Set capability flags */
1433 ifp->if_capabilities |= IFCAP_HWCSUM
1434 | IFCAP_TSOv4
1435 | IFCAP_TSOv6;
1436 ifp->if_capenable = 0;
1437
1438 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1439 | ETHERCAP_VLAN_HWCSUM
1440 | ETHERCAP_JUMBO_MTU
1441 | ETHERCAP_VLAN_MTU;
1442
1443 /* Enable the above capabilities by default */
1444 ec->ec_capenable = ec->ec_capabilities;
1445
1446 /* Don't enable LRO by default */
1447 ifp->if_capabilities |= IFCAP_LRO;
1448 #if 0
1449 ifp->if_capenable = ifp->if_capabilities;
1450 #endif
1451
1452 /*
1453 * Specify the media types supported by this adapter and register
1454 * callbacks to update media and link information
1455 */
1456 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1457 ixv_media_status);
1458 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1459 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1460
1461 return 0;
1462 } /* ixv_setup_interface */
1463
1464
1465 /************************************************************************
1466 * ixv_initialize_transmit_units - Enable transmit unit.
1467 ************************************************************************/
1468 static void
1469 ixv_initialize_transmit_units(struct adapter *adapter)
1470 {
1471 struct tx_ring *txr = adapter->tx_rings;
1472 struct ixgbe_hw *hw = &adapter->hw;
1473
1474
1475 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1476 u64 tdba = txr->txdma.dma_paddr;
1477 u32 txctrl, txdctl;
1478
1479 /* Set WTHRESH to 8, burst writeback */
1480 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1481 txdctl |= (8 << 16);
1482 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1483
1484 /* Set the HW Tx Head and Tail indices */
1485 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1486 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1487
1488 /* Set Tx Tail register */
1489 txr->tail = IXGBE_VFTDT(i);
1490
1491 /* Set Ring parameters */
1492 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1493 (tdba & 0x00000000ffffffffULL));
1494 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1495 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1496 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1497 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1498 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1499 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1500
1501 /* Now enable */
1502 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1503 txdctl |= IXGBE_TXDCTL_ENABLE;
1504 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1505 }
1506
1507 return;
1508 } /* ixv_initialize_transmit_units */
1509
1510
1511 /************************************************************************
1512 * ixv_initialize_rss_mapping
1513 ************************************************************************/
1514 static void
1515 ixv_initialize_rss_mapping(struct adapter *adapter)
1516 {
1517 struct ixgbe_hw *hw = &adapter->hw;
1518 u32 reta = 0, mrqc, rss_key[10];
1519 int queue_id;
1520 int i, j;
1521 u32 rss_hash_config;
1522
1523 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1524 /* Fetch the configured RSS key */
1525 rss_getkey((uint8_t *)&rss_key);
1526 } else {
1527 /* set up random bits */
1528 cprng_fast(&rss_key, sizeof(rss_key));
1529 }
1530
1531 /* Now fill out hash function seeds */
1532 for (i = 0; i < 10; i++)
1533 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1534
1535 /* Set up the redirection table */
1536 for (i = 0, j = 0; i < 64; i++, j++) {
1537 if (j == adapter->num_queues)
1538 j = 0;
1539
1540 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1541 /*
1542 * Fetch the RSS bucket id for the given indirection
1543 * entry. Cap it at the number of configured buckets
1544 * (which is num_queues.)
1545 */
1546 queue_id = rss_get_indirection_to_bucket(i);
1547 queue_id = queue_id % adapter->num_queues;
1548 } else
1549 queue_id = j;
1550
1551 /*
1552 * The low 8 bits are for hash value (n+0);
1553 * The next 8 bits are for hash value (n+1), etc.
1554 */
1555 reta >>= 8;
1556 reta |= ((uint32_t)queue_id) << 24;
1557 if ((i & 3) == 3) {
1558 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1559 reta = 0;
1560 }
1561 }
1562
1563 /* Perform hash on these packet types */
1564 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1565 rss_hash_config = rss_gethashconfig();
1566 else {
1567 /*
1568 * Disable UDP - IP fragments aren't currently being handled
1569 * and so we end up with a mix of 2-tuple and 4-tuple
1570 * traffic.
1571 */
1572 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1573 | RSS_HASHTYPE_RSS_TCP_IPV4
1574 | RSS_HASHTYPE_RSS_IPV6
1575 | RSS_HASHTYPE_RSS_TCP_IPV6;
1576 }
1577
1578 mrqc = IXGBE_MRQC_RSSEN;
1579 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1580 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1581 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1582 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1583 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1584 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1585 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1586 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1587 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1588 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1589 __func__);
1590 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1591 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1592 __func__);
1593 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1594 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1595 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1596 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1597 __func__);
1598 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1599 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1600 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1601 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1602 __func__);
1603 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1604 } /* ixv_initialize_rss_mapping */
1605
1606
1607 /************************************************************************
1608 * ixv_initialize_receive_units - Setup receive registers and features.
1609 ************************************************************************/
1610 static void
1611 ixv_initialize_receive_units(struct adapter *adapter)
1612 {
1613 struct rx_ring *rxr = adapter->rx_rings;
1614 struct ixgbe_hw *hw = &adapter->hw;
1615 struct ifnet *ifp = adapter->ifp;
1616 u32 bufsz, rxcsum, psrtype;
1617
1618 if (ifp->if_mtu > ETHERMTU)
1619 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1620 else
1621 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1622
1623 psrtype = IXGBE_PSRTYPE_TCPHDR
1624 | IXGBE_PSRTYPE_UDPHDR
1625 | IXGBE_PSRTYPE_IPV4HDR
1626 | IXGBE_PSRTYPE_IPV6HDR
1627 | IXGBE_PSRTYPE_L2HDR;
1628
1629 if (adapter->num_queues > 1)
1630 psrtype |= 1 << 29;
1631
1632 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1633
1634 /* Tell PF our max_frame size */
1635 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1636 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1637 }
1638
1639 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1640 u64 rdba = rxr->rxdma.dma_paddr;
1641 u32 reg, rxdctl;
1642
1643 /* Disable the queue */
1644 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1645 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1646 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1647 for (int j = 0; j < 10; j++) {
1648 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1649 IXGBE_RXDCTL_ENABLE)
1650 msec_delay(1);
1651 else
1652 break;
1653 }
1654 wmb();
1655 /* Setup the Base and Length of the Rx Descriptor Ring */
1656 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1657 (rdba & 0x00000000ffffffffULL));
1658 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1659 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1660 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1661
1662 /* Reset the ring indices */
1663 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1664 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1665
1666 /* Set up the SRRCTL register */
1667 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1668 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1669 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1670 reg |= bufsz;
1671 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1672 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1673
1674 /* Capture Rx Tail index */
1675 rxr->tail = IXGBE_VFRDT(rxr->me);
1676
1677 /* Do the queue enabling last */
1678 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1679 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1680 for (int k = 0; k < 10; k++) {
1681 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1682 IXGBE_RXDCTL_ENABLE)
1683 break;
1684 msec_delay(1);
1685 }
1686 wmb();
1687
1688 /* Set the Tail Pointer */
1689 /*
1690 * In netmap mode, we must preserve the buffers made
1691 * available to userspace before the if_init()
1692 * (this is true by default on the TX side, because
1693 * init makes all buffers available to userspace).
1694 *
1695 * netmap_reset() and the device specific routines
1696 * (e.g. ixgbe_setup_receive_rings()) map these
1697 * buffers at the end of the NIC ring, so here we
1698 * must set the RDT (tail) register to make sure
1699 * they are not overwritten.
1700 *
1701 * In this driver the NIC ring starts at RDH = 0,
1702 * RDT points to the last slot available for reception (?),
1703 * so RDT = num_rx_desc - 1 means the whole ring is available.
1704 */
1705 #ifdef DEV_NETMAP
1706 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1707 (ifp->if_capenable & IFCAP_NETMAP)) {
1708 struct netmap_adapter *na = NA(adapter->ifp);
1709 struct netmap_kring *kring = &na->rx_rings[i];
1710 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1711
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1713 } else
1714 #endif /* DEV_NETMAP */
1715 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1716 adapter->num_rx_desc - 1);
1717 }
1718
1719 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1720
1721 ixv_initialize_rss_mapping(adapter);
1722
1723 if (adapter->num_queues > 1) {
1724 /* RSS and RX IPP Checksum are mutually exclusive */
1725 rxcsum |= IXGBE_RXCSUM_PCSD;
1726 }
1727
1728 if (ifp->if_capenable & IFCAP_RXCSUM)
1729 rxcsum |= IXGBE_RXCSUM_PCSD;
1730
1731 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1732 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1733
1734 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1735
1736 return;
1737 } /* ixv_initialize_receive_units */
1738
1739 /************************************************************************
1740 * ixv_setup_vlan_support
1741 ************************************************************************/
1742 static void
1743 ixv_setup_vlan_support(struct adapter *adapter)
1744 {
1745 struct ethercom *ec = &adapter->osdep.ec;
1746 struct ixgbe_hw *hw = &adapter->hw;
1747 struct rx_ring *rxr;
1748 u32 ctrl, vid, vfta, retry;
1749
1750 /*
1751 * We get here thru init_locked, meaning
1752 * a soft reset, this has already cleared
1753 * the VFTA and other state, so if there
1754 * have been no vlan's registered do nothing.
1755 */
1756 if (!VLAN_ATTACHED(ec))
1757 return;
1758
1759 /* Enable the queues */
1760 for (int i = 0; i < adapter->num_queues; i++) {
1761 rxr = &adapter->rx_rings[i];
1762 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1763 ctrl |= IXGBE_RXDCTL_VME;
1764 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1765 /*
1766 * Let Rx path know that it needs to store VLAN tag
1767 * as part of extra mbuf info.
1768 */
1769 rxr->vtag_strip = TRUE;
1770 }
1771
1772 #if 1
1773 /* XXX dirty hack. Enable all VIDs */
1774 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1775 adapter->shadow_vfta[i] = 0xffffffff;
1776 #endif
1777 /*
1778 * A soft reset zero's out the VFTA, so
1779 * we need to repopulate it now.
1780 */
1781 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1782 if (adapter->shadow_vfta[i] == 0)
1783 continue;
1784 vfta = adapter->shadow_vfta[i];
1785 /*
1786 * Reconstruct the vlan id's
1787 * based on the bits set in each
1788 * of the array ints.
1789 */
1790 for (int j = 0; j < 32; j++) {
1791 retry = 0;
1792 if ((vfta & (1 << j)) == 0)
1793 continue;
1794 vid = (i * 32) + j;
1795 /* Call the shared code mailbox routine */
1796 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1797 if (++retry > 5)
1798 break;
1799 }
1800 }
1801 }
1802 } /* ixv_setup_vlan_support */
1803
1804 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1805 /************************************************************************
1806 * ixv_register_vlan
1807 *
1808 * Run via a vlan config EVENT, it enables us to use the
1809 * HW Filter table since we can get the vlan id. This just
1810 * creates the entry in the soft version of the VFTA, init
1811 * will repopulate the real table.
1812 ************************************************************************/
1813 static void
1814 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1815 {
1816 struct adapter *adapter = ifp->if_softc;
1817 u16 index, bit;
1818
1819 if (ifp->if_softc != arg) /* Not our event */
1820 return;
1821
1822 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1823 return;
1824
1825 IXGBE_CORE_LOCK(adapter);
1826 index = (vtag >> 5) & 0x7F;
1827 bit = vtag & 0x1F;
1828 adapter->shadow_vfta[index] |= (1 << bit);
1829 /* Re-init to load the changes */
1830 ixv_init_locked(adapter);
1831 IXGBE_CORE_UNLOCK(adapter);
1832 } /* ixv_register_vlan */
1833
1834 /************************************************************************
1835 * ixv_unregister_vlan
1836 *
1837 * Run via a vlan unconfig EVENT, remove our entry
1838 * in the soft vfta.
1839 ************************************************************************/
1840 static void
1841 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1842 {
1843 struct adapter *adapter = ifp->if_softc;
1844 u16 index, bit;
1845
1846 if (ifp->if_softc != arg)
1847 return;
1848
1849 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1850 return;
1851
1852 IXGBE_CORE_LOCK(adapter);
1853 index = (vtag >> 5) & 0x7F;
1854 bit = vtag & 0x1F;
1855 adapter->shadow_vfta[index] &= ~(1 << bit);
1856 /* Re-init to load the changes */
1857 ixv_init_locked(adapter);
1858 IXGBE_CORE_UNLOCK(adapter);
1859 } /* ixv_unregister_vlan */
1860 #endif
1861
1862 /************************************************************************
1863 * ixv_enable_intr
1864 ************************************************************************/
1865 static void
1866 ixv_enable_intr(struct adapter *adapter)
1867 {
1868 struct ixgbe_hw *hw = &adapter->hw;
1869 struct ix_queue *que = adapter->queues;
1870 u32 mask;
1871 int i;
1872
1873 /* For VTEIAC */
1874 mask = (1 << adapter->vector);
1875 for (i = 0; i < adapter->num_queues; i++, que++)
1876 mask |= (1 << que->msix);
1877 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1878
1879 /* For VTEIMS */
1880 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
1881 que = adapter->queues;
1882 for (i = 0; i < adapter->num_queues; i++, que++)
1883 ixv_enable_queue(adapter, que->msix);
1884
1885 IXGBE_WRITE_FLUSH(hw);
1886
1887 return;
1888 } /* ixv_enable_intr */
1889
1890 /************************************************************************
1891 * ixv_disable_intr
1892 ************************************************************************/
1893 static void
1894 ixv_disable_intr(struct adapter *adapter)
1895 {
1896 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1897 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1898 IXGBE_WRITE_FLUSH(&adapter->hw);
1899
1900 return;
1901 } /* ixv_disable_intr */
1902
1903 /************************************************************************
1904 * ixv_set_ivar
1905 *
1906 * Setup the correct IVAR register for a particular MSI-X interrupt
1907 * - entry is the register array entry
1908 * - vector is the MSI-X vector for this queue
1909 * - type is RX/TX/MISC
1910 ************************************************************************/
1911 static void
1912 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1913 {
1914 struct ixgbe_hw *hw = &adapter->hw;
1915 u32 ivar, index;
1916
1917 vector |= IXGBE_IVAR_ALLOC_VAL;
1918
1919 if (type == -1) { /* MISC IVAR */
1920 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1921 ivar &= ~0xFF;
1922 ivar |= vector;
1923 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1924 } else { /* RX/TX IVARS */
1925 index = (16 * (entry & 1)) + (8 * type);
1926 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1927 ivar &= ~(0xFF << index);
1928 ivar |= (vector << index);
1929 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1930 }
1931 } /* ixv_set_ivar */
1932
1933 /************************************************************************
1934 * ixv_configure_ivars
1935 ************************************************************************/
1936 static void
1937 ixv_configure_ivars(struct adapter *adapter)
1938 {
1939 struct ix_queue *que = adapter->queues;
1940
1941 for (int i = 0; i < adapter->num_queues; i++, que++) {
1942 /* First the RX queue entry */
1943 ixv_set_ivar(adapter, i, que->msix, 0);
1944 /* ... and the TX */
1945 ixv_set_ivar(adapter, i, que->msix, 1);
1946 /* Set an initial value in EITR */
1947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1948 IXGBE_EITR_DEFAULT);
1949 }
1950
1951 /* For the mailbox interrupt */
1952 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1953 } /* ixv_configure_ivars */
1954
1955
1956 /************************************************************************
1957 * ixv_save_stats
1958 *
1959 * The VF stats registers never have a truly virgin
1960 * starting point, so this routine tries to make an
1961 * artificial one, marking ground zero on attach as
1962 * it were.
1963 ************************************************************************/
1964 static void
1965 ixv_save_stats(struct adapter *adapter)
1966 {
1967 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1968
1969 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
1970 stats->saved_reset_vfgprc +=
1971 stats->vfgprc.ev_count - stats->base_vfgprc;
1972 stats->saved_reset_vfgptc +=
1973 stats->vfgptc.ev_count - stats->base_vfgptc;
1974 stats->saved_reset_vfgorc +=
1975 stats->vfgorc.ev_count - stats->base_vfgorc;
1976 stats->saved_reset_vfgotc +=
1977 stats->vfgotc.ev_count - stats->base_vfgotc;
1978 stats->saved_reset_vfmprc +=
1979 stats->vfmprc.ev_count - stats->base_vfmprc;
1980 }
1981 } /* ixv_save_stats */
1982
1983 /************************************************************************
1984 * ixv_init_stats
1985 ************************************************************************/
1986 static void
1987 ixv_init_stats(struct adapter *adapter)
1988 {
1989 struct ixgbe_hw *hw = &adapter->hw;
1990
1991 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1992 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1993 adapter->stats.vf.last_vfgorc |=
1994 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1995
1996 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1997 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1998 adapter->stats.vf.last_vfgotc |=
1999 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2000
2001 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2002
2003 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2004 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2005 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2006 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2007 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2008 } /* ixv_init_stats */
2009
2010 #define UPDATE_STAT_32(reg, last, count) \
2011 { \
2012 u32 current = IXGBE_READ_REG(hw, (reg)); \
2013 if (current < (last)) \
2014 count.ev_count += 0x100000000LL; \
2015 (last) = current; \
2016 count.ev_count &= 0xFFFFFFFF00000000LL; \
2017 count.ev_count |= current; \
2018 }
2019
2020 #define UPDATE_STAT_36(lsb, msb, last, count) \
2021 { \
2022 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2023 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2024 u64 current = ((cur_msb << 32) | cur_lsb); \
2025 if (current < (last)) \
2026 count.ev_count += 0x1000000000LL; \
2027 (last) = current; \
2028 count.ev_count &= 0xFFFFFFF000000000LL; \
2029 count.ev_count |= current; \
2030 }
2031
2032 /************************************************************************
2033 * ixv_update_stats - Update the board statistics counters.
2034 ************************************************************************/
2035 void
2036 ixv_update_stats(struct adapter *adapter)
2037 {
2038 struct ixgbe_hw *hw = &adapter->hw;
2039 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2040
2041 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2042 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2043 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2044 stats->vfgorc);
2045 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2046 stats->vfgotc);
2047 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2048
2049 /* Fill out the OS statistics structure */
2050 /*
2051 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2052 * adapter->stats counters. It's required to make ifconfig -z
2053 * (SOICZIFDATA) work.
2054 */
2055 } /* ixv_update_stats */
2056
2057 const struct sysctlnode *
2058 ixv_sysctl_instance(struct adapter *adapter)
2059 {
2060 const char *dvname;
2061 struct sysctllog **log;
2062 int rc;
2063 const struct sysctlnode *rnode;
2064
2065 log = &adapter->sysctllog;
2066 dvname = device_xname(adapter->dev);
2067
2068 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2069 0, CTLTYPE_NODE, dvname,
2070 SYSCTL_DESCR("ixv information and settings"),
2071 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2072 goto err;
2073
2074 return rnode;
2075 err:
2076 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2077 return NULL;
2078 }
2079
2080 static void
2081 ixv_add_device_sysctls(struct adapter *adapter)
2082 {
2083 struct sysctllog **log;
2084 const struct sysctlnode *rnode, *cnode;
2085 device_t dev;
2086
2087 dev = adapter->dev;
2088 log = &adapter->sysctllog;
2089
2090 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2091 aprint_error_dev(dev, "could not create sysctl root\n");
2092 return;
2093 }
2094
2095 if (sysctl_createv(log, 0, &rnode, &cnode,
2096 CTLFLAG_READWRITE, CTLTYPE_INT,
2097 "debug", SYSCTL_DESCR("Debug Info"),
2098 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2099 aprint_error_dev(dev, "could not create sysctl\n");
2100
2101 if (sysctl_createv(log, 0, &rnode, &cnode,
2102 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2103 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2104 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2105 aprint_error_dev(dev, "could not create sysctl\n");
2106 }
2107
2108 /************************************************************************
2109 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2110 ************************************************************************/
2111 static void
2112 ixv_add_stats_sysctls(struct adapter *adapter)
2113 {
2114 device_t dev = adapter->dev;
2115 struct tx_ring *txr = adapter->tx_rings;
2116 struct rx_ring *rxr = adapter->rx_rings;
2117 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2118 struct ixgbe_hw *hw = &adapter->hw;
2119 const struct sysctlnode *rnode;
2120 struct sysctllog **log = &adapter->sysctllog;
2121 const char *xname = device_xname(dev);
2122
2123 /* Driver Statistics */
2124 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2125 NULL, xname, "Handled queue in softint");
2126 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2127 NULL, xname, "Requeued in softint");
2128 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2129 NULL, xname, "Driver tx dma soft fail EFBIG");
2130 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2131 NULL, xname, "m_defrag() failed");
2132 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2133 NULL, xname, "Driver tx dma hard fail EFBIG");
2134 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2135 NULL, xname, "Driver tx dma hard fail EINVAL");
2136 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2137 NULL, xname, "Driver tx dma hard fail other");
2138 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2139 NULL, xname, "Driver tx dma soft fail EAGAIN");
2140 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2141 NULL, xname, "Driver tx dma soft fail ENOMEM");
2142 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2143 NULL, xname, "Watchdog timeouts");
2144 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2145 NULL, xname, "TSO errors");
2146 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2147 NULL, xname, "Link MSI-X IRQ Handled");
2148
2149 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2150 snprintf(adapter->queues[i].evnamebuf,
2151 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2152 xname, i);
2153 snprintf(adapter->queues[i].namebuf,
2154 sizeof(adapter->queues[i].namebuf), "q%d", i);
2155
2156 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2157 aprint_error_dev(dev, "could not create sysctl root\n");
2158 break;
2159 }
2160
2161 if (sysctl_createv(log, 0, &rnode, &rnode,
2162 0, CTLTYPE_NODE,
2163 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2164 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2165 break;
2166
2167 #if 0 /* not yet */
2168 if (sysctl_createv(log, 0, &rnode, &cnode,
2169 CTLFLAG_READWRITE, CTLTYPE_INT,
2170 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2171 ixgbe_sysctl_interrupt_rate_handler, 0,
2172 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2173 break;
2174
2175 if (sysctl_createv(log, 0, &rnode, &cnode,
2176 CTLFLAG_READONLY, CTLTYPE_QUAD,
2177 "irqs", SYSCTL_DESCR("irqs on this queue"),
2178 NULL, 0, &(adapter->queues[i].irqs),
2179 0, CTL_CREATE, CTL_EOL) != 0)
2180 break;
2181
2182 if (sysctl_createv(log, 0, &rnode, &cnode,
2183 CTLFLAG_READONLY, CTLTYPE_INT,
2184 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2185 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2186 0, CTL_CREATE, CTL_EOL) != 0)
2187 break;
2188
2189 if (sysctl_createv(log, 0, &rnode, &cnode,
2190 CTLFLAG_READONLY, CTLTYPE_INT,
2191 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2192 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2193 0, CTL_CREATE, CTL_EOL) != 0)
2194 break;
2195 #endif
2196 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2197 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2198 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2199 NULL, adapter->queues[i].evnamebuf, "TSO");
2200 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2201 NULL, adapter->queues[i].evnamebuf,
2202 "Queue No Descriptor Available");
2203 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2204 NULL, adapter->queues[i].evnamebuf,
2205 "Queue Packets Transmitted");
2206 #ifndef IXGBE_LEGACY_TX
2207 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2208 NULL, adapter->queues[i].evnamebuf,
2209 "Packets dropped in pcq");
2210 #endif
2211
2212 #ifdef LRO
2213 struct lro_ctrl *lro = &rxr->lro;
2214 #endif /* LRO */
2215
2216 #if 0 /* not yet */
2217 if (sysctl_createv(log, 0, &rnode, &cnode,
2218 CTLFLAG_READONLY,
2219 CTLTYPE_INT,
2220 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2221 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2222 CTL_CREATE, CTL_EOL) != 0)
2223 break;
2224
2225 if (sysctl_createv(log, 0, &rnode, &cnode,
2226 CTLFLAG_READONLY,
2227 CTLTYPE_INT,
2228 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2229 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2230 CTL_CREATE, CTL_EOL) != 0)
2231 break;
2232 #endif
2233
2234 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2235 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2236 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2237 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2238 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2239 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2240 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2241 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2242 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2243 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2244 #ifdef LRO
2245 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2246 CTLFLAG_RD, &lro->lro_queued, 0,
2247 "LRO Queued");
2248 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2249 CTLFLAG_RD, &lro->lro_flushed, 0,
2250 "LRO Flushed");
2251 #endif /* LRO */
2252 }
2253
2254 /* MAC stats get their own sub node */
2255
2256 snprintf(stats->namebuf,
2257 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2258
2259 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2260 stats->namebuf, "rx csum offload - IP");
2261 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2262 stats->namebuf, "rx csum offload - L4");
2263 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2264 stats->namebuf, "rx csum offload - IP bad");
2265 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2266 stats->namebuf, "rx csum offload - L4 bad");
2267
2268 /* Packet Reception Stats */
2269 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2270 xname, "Good Packets Received");
2271 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2272 xname, "Good Octets Received");
2273 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2274 xname, "Multicast Packets Received");
2275 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2276 xname, "Good Packets Transmitted");
2277 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2278 xname, "Good Octets Transmitted");
2279
2280 /* Mailbox Stats */
2281 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2282 xname, "message TXs");
2283 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2284 xname, "message RXs");
2285 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2286 xname, "ACKs");
2287 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2288 xname, "REQs");
2289 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2290 xname, "RSTs");
2291
2292 } /* ixv_add_stats_sysctls */
2293
2294 /************************************************************************
2295 * ixv_set_sysctl_value
2296 ************************************************************************/
2297 static void
2298 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2299 const char *description, int *limit, int value)
2300 {
2301 device_t dev = adapter->dev;
2302 struct sysctllog **log;
2303 const struct sysctlnode *rnode, *cnode;
2304
2305 log = &adapter->sysctllog;
2306 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2307 aprint_error_dev(dev, "could not create sysctl root\n");
2308 return;
2309 }
2310 if (sysctl_createv(log, 0, &rnode, &cnode,
2311 CTLFLAG_READWRITE, CTLTYPE_INT,
2312 name, SYSCTL_DESCR(description),
2313 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2314 aprint_error_dev(dev, "could not create sysctl\n");
2315 *limit = value;
2316 } /* ixv_set_sysctl_value */
2317
2318 /************************************************************************
2319 * ixv_print_debug_info
2320 *
2321 * Called only when em_display_debug_stats is enabled.
2322 * Provides a way to take a look at important statistics
2323 * maintained by the driver and hardware.
2324 ************************************************************************/
2325 static void
2326 ixv_print_debug_info(struct adapter *adapter)
2327 {
2328 device_t dev = adapter->dev;
2329 struct ixgbe_hw *hw = &adapter->hw;
2330 struct ix_queue *que = adapter->queues;
2331 struct rx_ring *rxr;
2332 struct tx_ring *txr;
2333 #ifdef LRO
2334 struct lro_ctrl *lro;
2335 #endif /* LRO */
2336
2337 device_printf(dev, "Error Byte Count = %u \n",
2338 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2339
2340 for (int i = 0; i < adapter->num_queues; i++, que++) {
2341 txr = que->txr;
2342 rxr = que->rxr;
2343 #ifdef LRO
2344 lro = &rxr->lro;
2345 #endif /* LRO */
2346 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2347 que->msix, (long)que->irqs.ev_count);
2348 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2349 rxr->me, (long long)rxr->rx_packets.ev_count);
2350 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2351 rxr->me, (long)rxr->rx_bytes.ev_count);
2352 #ifdef LRO
2353 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2354 rxr->me, (long long)lro->lro_queued);
2355 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2356 rxr->me, (long long)lro->lro_flushed);
2357 #endif /* LRO */
2358 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2359 txr->me, (long)txr->total_packets.ev_count);
2360 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2361 txr->me, (long)txr->no_desc_avail.ev_count);
2362 }
2363
2364 device_printf(dev, "MBX IRQ Handled: %lu\n",
2365 (long)adapter->link_irq.ev_count);
2366 } /* ixv_print_debug_info */
2367
2368 /************************************************************************
2369 * ixv_sysctl_debug
2370 ************************************************************************/
2371 static int
2372 ixv_sysctl_debug(SYSCTLFN_ARGS)
2373 {
2374 struct sysctlnode node;
2375 struct adapter *adapter;
2376 int error, result;
2377
2378 node = *rnode;
2379 node.sysctl_data = &result;
2380 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2381
2382 if (error || newp == NULL)
2383 return error;
2384
2385 if (result == 1) {
2386 adapter = (struct adapter *)node.sysctl_data;
2387 ixv_print_debug_info(adapter);
2388 }
2389
2390 return 0;
2391 } /* ixv_sysctl_debug */
2392
2393 /************************************************************************
2394 * ixv_init_device_features
2395 ************************************************************************/
2396 static void
2397 ixv_init_device_features(struct adapter *adapter)
2398 {
2399 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2400 | IXGBE_FEATURE_VF
2401 | IXGBE_FEATURE_RSS
2402 | IXGBE_FEATURE_LEGACY_TX;
2403
2404 /* A tad short on feature flags for VFs, atm. */
2405 switch (adapter->hw.mac.type) {
2406 case ixgbe_mac_82599_vf:
2407 break;
2408 case ixgbe_mac_X540_vf:
2409 break;
2410 case ixgbe_mac_X550_vf:
2411 case ixgbe_mac_X550EM_x_vf:
2412 case ixgbe_mac_X550EM_a_vf:
2413 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2414 break;
2415 default:
2416 break;
2417 }
2418
2419 /* Enabled by default... */
2420 /* Is a virtual function (VF) */
2421 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2422 adapter->feat_en |= IXGBE_FEATURE_VF;
2423 /* Netmap */
2424 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2425 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2426 /* Receive-Side Scaling (RSS) */
2427 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2428 adapter->feat_en |= IXGBE_FEATURE_RSS;
2429 /* Needs advanced context descriptor regardless of offloads req'd */
2430 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2431 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2432
2433 /* Enabled via sysctl... */
2434 /* Legacy (single queue) transmit */
2435 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2436 ixv_enable_legacy_tx)
2437 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2438 } /* ixv_init_device_features */
2439
2440 /************************************************************************
2441 * ixv_shutdown - Shutdown entry point
2442 ************************************************************************/
2443 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2444 static int
2445 ixv_shutdown(device_t dev)
2446 {
2447 struct adapter *adapter = device_private(dev);
2448 IXGBE_CORE_LOCK(adapter);
2449 ixv_stop(adapter);
2450 IXGBE_CORE_UNLOCK(adapter);
2451
2452 return (0);
2453 } /* ixv_shutdown */
2454 #endif
2455
2456 static int
2457 ixv_ifflags_cb(struct ethercom *ec)
2458 {
2459 struct ifnet *ifp = &ec->ec_if;
2460 struct adapter *adapter = ifp->if_softc;
2461 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2462
2463 IXGBE_CORE_LOCK(adapter);
2464
2465 if (change != 0)
2466 adapter->if_flags = ifp->if_flags;
2467
2468 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2469 rc = ENETRESET;
2470
2471 /* Set up VLAN support and filter */
2472 ixv_setup_vlan_support(adapter);
2473
2474 IXGBE_CORE_UNLOCK(adapter);
2475
2476 return rc;
2477 }
2478
2479
2480 /************************************************************************
2481 * ixv_ioctl - Ioctl entry point
2482 *
2483 * Called when the user wants to configure the interface.
2484 *
2485 * return 0 on success, positive on failure
2486 ************************************************************************/
2487 static int
2488 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2489 {
2490 struct adapter *adapter = ifp->if_softc;
2491 struct ifcapreq *ifcr = data;
2492 struct ifreq *ifr = data;
2493 int error = 0;
2494 int l4csum_en;
2495 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2496 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2497
2498 switch (command) {
2499 case SIOCSIFFLAGS:
2500 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2501 break;
2502 case SIOCADDMULTI:
2503 case SIOCDELMULTI:
2504 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2505 break;
2506 case SIOCSIFMEDIA:
2507 case SIOCGIFMEDIA:
2508 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2509 break;
2510 case SIOCSIFCAP:
2511 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2512 break;
2513 case SIOCSIFMTU:
2514 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2515 break;
2516 default:
2517 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2518 break;
2519 }
2520
2521 switch (command) {
2522 case SIOCSIFMEDIA:
2523 case SIOCGIFMEDIA:
2524 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2525 case SIOCSIFCAP:
2526 /* Layer-4 Rx checksum offload has to be turned on and
2527 * off as a unit.
2528 */
2529 l4csum_en = ifcr->ifcr_capenable & l4csum;
2530 if (l4csum_en != l4csum && l4csum_en != 0)
2531 return EINVAL;
2532 /*FALLTHROUGH*/
2533 case SIOCADDMULTI:
2534 case SIOCDELMULTI:
2535 case SIOCSIFFLAGS:
2536 case SIOCSIFMTU:
2537 default:
2538 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2539 return error;
2540 if ((ifp->if_flags & IFF_RUNNING) == 0)
2541 ;
2542 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2543 IXGBE_CORE_LOCK(adapter);
2544 ixv_init_locked(adapter);
2545 IXGBE_CORE_UNLOCK(adapter);
2546 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2547 /*
2548 * Multicast list has changed; set the hardware filter
2549 * accordingly.
2550 */
2551 IXGBE_CORE_LOCK(adapter);
2552 ixv_disable_intr(adapter);
2553 ixv_set_multi(adapter);
2554 ixv_enable_intr(adapter);
2555 IXGBE_CORE_UNLOCK(adapter);
2556 }
2557 return 0;
2558 }
2559 } /* ixv_ioctl */
2560
2561 /************************************************************************
2562 * ixv_init
2563 ************************************************************************/
2564 static int
2565 ixv_init(struct ifnet *ifp)
2566 {
2567 struct adapter *adapter = ifp->if_softc;
2568
2569 IXGBE_CORE_LOCK(adapter);
2570 ixv_init_locked(adapter);
2571 IXGBE_CORE_UNLOCK(adapter);
2572
2573 return 0;
2574 } /* ixv_init */
2575
2576
2577 /************************************************************************
2578 * ixv_handle_que
2579 ************************************************************************/
2580 static void
2581 ixv_handle_que(void *context)
2582 {
2583 struct ix_queue *que = context;
2584 struct adapter *adapter = que->adapter;
2585 struct tx_ring *txr = que->txr;
2586 struct ifnet *ifp = adapter->ifp;
2587 bool more;
2588
2589 adapter->handleq.ev_count++;
2590
2591 if (ifp->if_flags & IFF_RUNNING) {
2592 more = ixgbe_rxeof(que);
2593 IXGBE_TX_LOCK(txr);
2594 ixgbe_txeof(txr);
2595 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2596 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2597 ixgbe_mq_start_locked(ifp, txr);
2598 /* Only for queue 0 */
2599 /* NetBSD still needs this for CBQ */
2600 if ((&adapter->queues[0] == que)
2601 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2602 ixgbe_legacy_start_locked(ifp, txr);
2603 IXGBE_TX_UNLOCK(txr);
2604 if (more) {
2605 adapter->req.ev_count++;
2606 softint_schedule(que->que_si);
2607 return;
2608 }
2609 }
2610
2611 /* Re-enable this interrupt */
2612 ixv_enable_queue(adapter, que->msix);
2613
2614 return;
2615 } /* ixv_handle_que */
2616
2617 /************************************************************************
2618 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2619 ************************************************************************/
2620 static int
2621 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2622 {
2623 device_t dev = adapter->dev;
2624 struct ix_queue *que = adapter->queues;
2625 struct tx_ring *txr = adapter->tx_rings;
2626 int error, msix_ctrl, rid, vector = 0;
2627 pci_chipset_tag_t pc;
2628 pcitag_t tag;
2629 char intrbuf[PCI_INTRSTR_LEN];
2630 char intr_xname[32];
2631 const char *intrstr = NULL;
2632 kcpuset_t *affinity;
2633 int cpu_id = 0;
2634
2635 pc = adapter->osdep.pc;
2636 tag = adapter->osdep.tag;
2637
2638 adapter->osdep.nintrs = adapter->num_queues + 1;
2639 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2640 adapter->osdep.nintrs) != 0) {
2641 aprint_error_dev(dev,
2642 "failed to allocate MSI-X interrupt\n");
2643 return (ENXIO);
2644 }
2645
2646 kcpuset_create(&affinity, false);
2647 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2648 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2649 device_xname(dev), i);
2650 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2651 sizeof(intrbuf));
2652 #ifdef IXGBE_MPSAFE
2653 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2654 true);
2655 #endif
2656 /* Set the handler function */
2657 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2658 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2659 intr_xname);
2660 if (que->res == NULL) {
2661 pci_intr_release(pc, adapter->osdep.intrs,
2662 adapter->osdep.nintrs);
2663 aprint_error_dev(dev,
2664 "Failed to register QUE handler\n");
2665 kcpuset_destroy(affinity);
2666 return (ENXIO);
2667 }
2668 que->msix = vector;
2669 adapter->active_queues |= (u64)(1 << que->msix);
2670
2671 cpu_id = i;
2672 /* Round-robin affinity */
2673 kcpuset_zero(affinity);
2674 kcpuset_set(affinity, cpu_id % ncpu);
2675 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2676 NULL);
2677 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2678 intrstr);
2679 if (error == 0)
2680 aprint_normal(", bound queue %d to cpu %d\n",
2681 i, cpu_id % ncpu);
2682 else
2683 aprint_normal("\n");
2684
2685 #ifndef IXGBE_LEGACY_TX
2686 txr->txr_si
2687 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2688 ixgbe_deferred_mq_start, txr);
2689 #endif
2690 que->que_si
2691 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2692 ixv_handle_que, que);
2693 if (que->que_si == NULL) {
2694 aprint_error_dev(dev,
2695 "could not establish software interrupt\n");
2696 }
2697 }
2698
2699 /* and Mailbox */
2700 cpu_id++;
2701 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2702 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2703 sizeof(intrbuf));
2704 #ifdef IXGBE_MPSAFE
2705 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2706 true);
2707 #endif
2708 /* Set the mbx handler function */
2709 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2710 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2711 intr_xname);
2712 if (adapter->osdep.ihs[vector] == NULL) {
2713 adapter->res = NULL;
2714 aprint_error_dev(dev, "Failed to register LINK handler\n");
2715 kcpuset_destroy(affinity);
2716 return (ENXIO);
2717 }
2718 /* Round-robin affinity */
2719 kcpuset_zero(affinity);
2720 kcpuset_set(affinity, cpu_id % ncpu);
2721 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2722
2723 aprint_normal_dev(dev,
2724 "for link, interrupting at %s", intrstr);
2725 if (error == 0)
2726 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2727 else
2728 aprint_normal("\n");
2729
2730 adapter->vector = vector;
2731 /* Tasklets for Mailbox */
2732 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2733 ixv_handle_link, adapter);
2734 /*
2735 * Due to a broken design QEMU will fail to properly
2736 * enable the guest for MSI-X unless the vectors in
2737 * the table are all set up, so we must rewrite the
2738 * ENABLE in the MSI-X control register again at this
2739 * point to cause it to successfully initialize us.
2740 */
2741 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2742 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2743 rid += PCI_MSIX_CTL;
2744 msix_ctrl = pci_conf_read(pc, tag, rid);
2745 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2746 pci_conf_write(pc, tag, rid, msix_ctrl);
2747 }
2748
2749 kcpuset_destroy(affinity);
2750 return (0);
2751 } /* ixv_allocate_msix */
2752
2753 /************************************************************************
2754 * ixv_configure_interrupts - Setup MSI-X resources
2755 *
2756 * Note: The VF device MUST use MSI-X, there is no fallback.
2757 ************************************************************************/
2758 static int
2759 ixv_configure_interrupts(struct adapter *adapter)
2760 {
2761 device_t dev = adapter->dev;
2762 int want, queues, msgs;
2763
2764 /* Must have at least 2 MSI-X vectors */
2765 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2766 if (msgs < 2) {
2767 aprint_error_dev(dev, "MSIX config error\n");
2768 return (ENXIO);
2769 }
2770 msgs = MIN(msgs, IXG_MAX_NINTR);
2771
2772 /* Figure out a reasonable auto config value */
2773 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2774
2775 if (ixv_num_queues != 0)
2776 queues = ixv_num_queues;
2777 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
2778 queues = IXGBE_VF_MAX_TX_QUEUES;
2779
2780 /*
2781 * Want vectors for the queues,
2782 * plus an additional for mailbox.
2783 */
2784 want = queues + 1;
2785 if (msgs >= want)
2786 msgs = want;
2787 else {
2788 aprint_error_dev(dev,
2789 "MSI-X Configuration Problem, "
2790 "%d vectors but %d queues wanted!\n",
2791 msgs, want);
2792 return -1;
2793 }
2794
2795 adapter->msix_mem = (void *)1; /* XXX */
2796 aprint_normal_dev(dev,
2797 "Using MSI-X interrupts with %d vectors\n", msgs);
2798 adapter->num_queues = queues;
2799
2800 return (0);
2801 } /* ixv_configure_interrupts */
2802
2803
2804 /************************************************************************
2805 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2806 *
2807 * Done outside of interrupt context since the driver might sleep
2808 ************************************************************************/
2809 static void
2810 ixv_handle_link(void *context)
2811 {
2812 struct adapter *adapter = context;
2813
2814 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2815 &adapter->link_up, FALSE);
2816 ixv_update_link_status(adapter);
2817 } /* ixv_handle_link */
2818
2819 /************************************************************************
2820 * ixv_check_link - Used in the local timer to poll for link changes
2821 ************************************************************************/
2822 static void
2823 ixv_check_link(struct adapter *adapter)
2824 {
2825 adapter->hw.mac.get_link_status = TRUE;
2826
2827 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2828 &adapter->link_up, FALSE);
2829 ixv_update_link_status(adapter);
2830 } /* ixv_check_link */
2831