ixv.c revision 1.69 1 /*$NetBSD: ixv.c,v 1.69 2017/10/04 11:03:20 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static void ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121
122 static void ixv_setup_vlan_support(struct adapter *);
123 #if 0
124 static void ixv_register_vlan(void *, struct ifnet *, u16);
125 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
126 #endif
127
128 static void ixv_add_device_sysctls(struct adapter *);
129 static void ixv_save_stats(struct adapter *);
130 static void ixv_init_stats(struct adapter *);
131 static void ixv_update_stats(struct adapter *);
132 static void ixv_add_stats_sysctls(struct adapter *);
133 static void ixv_set_sysctl_value(struct adapter *, const char *,
134 const char *, int *, int);
135
136 /* The MSI-X Interrupt handlers */
137 static int ixv_msix_que(void *);
138 static int ixv_msix_mbx(void *);
139
140 /* Deferred interrupt tasklets */
141 static void ixv_handle_que(void *);
142 static void ixv_handle_link(void *);
143
144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
146
147 /************************************************************************
148 * FreeBSD Device Interface Entry Points
149 ************************************************************************/
150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
151 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
152 DVF_DETACH_SHUTDOWN);
153
154 #if 0
155 static driver_t ixv_driver = {
156 "ixv", ixv_methods, sizeof(struct adapter),
157 };
158
159 devclass_t ixv_devclass;
160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 #endif
164
165 /*
166 * TUNEABLE PARAMETERS:
167 */
168
169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
170 static int ixv_num_queues = 0;
171 #define TUNABLE_INT(__x, __y)
172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
173
174 /*
175 * AIM: Adaptive Interrupt Moderation
176 * which means that the interrupt rate
177 * is varied over time based on the
178 * traffic for that interrupt vector
179 */
180 static bool ixv_enable_aim = false;
181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
182
183 /* How many packets rxeof tries to clean at a time */
184 static int ixv_rx_process_limit = 256;
185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
186
187 /* How many packets txeof tries to clean at a time */
188 static int ixv_tx_process_limit = 256;
189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
190
191 /*
192 * Number of TX descriptors per ring,
193 * setting higher than RX as this seems
194 * the better performing choice.
195 */
196 static int ixv_txd = PERFORM_TXD;
197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
198
199 /* Number of RX descriptors per ring */
200 static int ixv_rxd = PERFORM_RXD;
201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
202
203 /* Legacy Transmit (single queue) */
204 static int ixv_enable_legacy_tx = 0;
205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
206
207 #ifdef NET_MPSAFE
208 #define IXGBE_MPSAFE 1
209 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
210 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
211 #else
212 #define IXGBE_CALLOUT_FLAGS 0
213 #define IXGBE_SOFTINFT_FLAGS 0
214 #endif
215
216 #if 0
217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
219 #endif
220
221 /************************************************************************
222 * ixv_probe - Device identification routine
223 *
224 * Determines if the driver should be loaded on
225 * adapter based on its PCI vendor/device ID.
226 *
227 * return BUS_PROBE_DEFAULT on success, positive on failure
228 ************************************************************************/
229 static int
230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
231 {
232 #ifdef __HAVE_PCI_MSI_MSIX
233 const struct pci_attach_args *pa = aux;
234
235 return (ixv_lookup(pa) != NULL) ? 1 : 0;
236 #else
237 return 0;
238 #endif
239 } /* ixv_probe */
240
241 static ixgbe_vendor_info_t *
242 ixv_lookup(const struct pci_attach_args *pa)
243 {
244 ixgbe_vendor_info_t *ent;
245 pcireg_t subid;
246
247 INIT_DEBUGOUT("ixv_lookup: begin");
248
249 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
250 return NULL;
251
252 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
253
254 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
255 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
256 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
257 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
258 (ent->subvendor_id == 0)) &&
259 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
260 (ent->subdevice_id == 0))) {
261 return ent;
262 }
263 }
264
265 return NULL;
266 }
267
268 /************************************************************************
269 * ixv_attach - Device initialization routine
270 *
271 * Called when the driver is being loaded.
272 * Identifies the type of hardware, allocates all resources
273 * and initializes the hardware.
274 *
275 * return 0 on success, positive on failure
276 ************************************************************************/
277 static void
278 ixv_attach(device_t parent, device_t dev, void *aux)
279 {
280 struct adapter *adapter;
281 struct ixgbe_hw *hw;
282 int error = 0;
283 pcireg_t id, subid;
284 ixgbe_vendor_info_t *ent;
285 const struct pci_attach_args *pa = aux;
286 const char *apivstr;
287 const char *str;
288 char buf[256];
289
290 INIT_DEBUGOUT("ixv_attach: begin");
291
292 /*
293 * Make sure BUSMASTER is set, on a VM under
294 * KVM it may not be and will break things.
295 */
296 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
297
298 /* Allocate, clear, and link in our adapter structure */
299 adapter = device_private(dev);
300 adapter->dev = dev;
301 adapter->hw.back = adapter;
302 hw = &adapter->hw;
303
304 adapter->init_locked = ixv_init_locked;
305 adapter->stop_locked = ixv_stop;
306
307 adapter->osdep.pc = pa->pa_pc;
308 adapter->osdep.tag = pa->pa_tag;
309 if (pci_dma64_available(pa))
310 adapter->osdep.dmat = pa->pa_dmat64;
311 else
312 adapter->osdep.dmat = pa->pa_dmat;
313 adapter->osdep.attached = false;
314
315 ent = ixv_lookup(pa);
316
317 KASSERT(ent != NULL);
318
319 aprint_normal(": %s, Version - %s\n",
320 ixv_strings[ent->index], ixv_driver_version);
321
322 /* Core Lock Init*/
323 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
324
325 /* Do base PCI setup - map BAR0 */
326 if (ixv_allocate_pci_resources(adapter, pa)) {
327 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
328 error = ENXIO;
329 goto err_out;
330 }
331
332 /* SYSCTL APIs */
333 ixv_add_device_sysctls(adapter);
334
335 /* Set up the timer callout */
336 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
337
338 /* Save off the information about this board */
339 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
340 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
341 hw->vendor_id = PCI_VENDOR(id);
342 hw->device_id = PCI_PRODUCT(id);
343 hw->revision_id =
344 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
345 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
346 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
347
348 /* A subset of set_mac_type */
349 switch (hw->device_id) {
350 case IXGBE_DEV_ID_82599_VF:
351 hw->mac.type = ixgbe_mac_82599_vf;
352 str = "82599 VF";
353 break;
354 case IXGBE_DEV_ID_X540_VF:
355 hw->mac.type = ixgbe_mac_X540_vf;
356 str = "X540 VF";
357 break;
358 case IXGBE_DEV_ID_X550_VF:
359 hw->mac.type = ixgbe_mac_X550_vf;
360 str = "X550 VF";
361 break;
362 case IXGBE_DEV_ID_X550EM_X_VF:
363 hw->mac.type = ixgbe_mac_X550EM_x_vf;
364 str = "X550EM X VF";
365 break;
366 case IXGBE_DEV_ID_X550EM_A_VF:
367 hw->mac.type = ixgbe_mac_X550EM_a_vf;
368 str = "X550EM A VF";
369 break;
370 default:
371 /* Shouldn't get here since probe succeeded */
372 aprint_error_dev(dev, "Unknown device ID!\n");
373 error = ENXIO;
374 goto err_out;
375 break;
376 }
377 aprint_normal_dev(dev, "device %s\n", str);
378
379 ixv_init_device_features(adapter);
380
381 /* Initialize the shared code */
382 error = ixgbe_init_ops_vf(hw);
383 if (error) {
384 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
385 error = EIO;
386 goto err_out;
387 }
388
389 /* Setup the mailbox */
390 ixgbe_init_mbx_params_vf(hw);
391
392 /* Set the right number of segments */
393 adapter->num_segs = IXGBE_82599_SCATTER;
394
395 /* Reset mbox api to 1.0 */
396 error = hw->mac.ops.reset_hw(hw);
397 if (error == IXGBE_ERR_RESET_FAILED)
398 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
399 else if (error)
400 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
401 error);
402 if (error) {
403 error = EIO;
404 goto err_out;
405 }
406
407 error = hw->mac.ops.init_hw(hw);
408 if (error) {
409 aprint_error_dev(dev, "...init_hw() failed!\n");
410 error = EIO;
411 goto err_out;
412 }
413
414 /* Negotiate mailbox API version */
415 error = ixv_negotiate_api(adapter);
416 if (error)
417 aprint_normal_dev(dev,
418 "MBX API negotiation failed during attach!\n");
419 switch (hw->api_version) {
420 case ixgbe_mbox_api_10:
421 apivstr = "1.0";
422 break;
423 case ixgbe_mbox_api_20:
424 apivstr = "2.0";
425 break;
426 case ixgbe_mbox_api_11:
427 apivstr = "1.1";
428 break;
429 case ixgbe_mbox_api_12:
430 apivstr = "1.2";
431 break;
432 case ixgbe_mbox_api_13:
433 apivstr = "1.3";
434 break;
435 default:
436 apivstr = "unknown";
437 break;
438 }
439 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
440
441 /* If no mac address was assigned, make a random one */
442 if (!ixv_check_ether_addr(hw->mac.addr)) {
443 u8 addr[ETHER_ADDR_LEN];
444 uint64_t rndval = cprng_strong64();
445
446 memcpy(addr, &rndval, sizeof(addr));
447 addr[0] &= 0xFE;
448 addr[0] |= 0x02;
449 bcopy(addr, hw->mac.addr, sizeof(addr));
450 }
451
452 /* Register for VLAN events */
453 #if 0 /* XXX delete after write? */
454 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
455 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
456 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
457 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
458 #endif
459
460 /* Sysctls for limiting the amount of work done in the taskqueues */
461 ixv_set_sysctl_value(adapter, "rx_processing_limit",
462 "max number of rx packets to process",
463 &adapter->rx_process_limit, ixv_rx_process_limit);
464
465 ixv_set_sysctl_value(adapter, "tx_processing_limit",
466 "max number of tx packets to process",
467 &adapter->tx_process_limit, ixv_tx_process_limit);
468
469 /* Do descriptor calc and sanity checks */
470 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
471 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
472 aprint_error_dev(dev, "TXD config issue, using default!\n");
473 adapter->num_tx_desc = DEFAULT_TXD;
474 } else
475 adapter->num_tx_desc = ixv_txd;
476
477 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
478 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
479 aprint_error_dev(dev, "RXD config issue, using default!\n");
480 adapter->num_rx_desc = DEFAULT_RXD;
481 } else
482 adapter->num_rx_desc = ixv_rxd;
483
484 /* Setup MSI-X */
485 error = ixv_configure_interrupts(adapter);
486 if (error)
487 goto err_out;
488
489 /* Allocate our TX/RX Queues */
490 if (ixgbe_allocate_queues(adapter)) {
491 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
492 error = ENOMEM;
493 goto err_out;
494 }
495
496 /* hw.ix defaults init */
497 adapter->enable_aim = ixv_enable_aim;
498
499 /* Setup OS specific network interface */
500 ixv_setup_interface(dev, adapter);
501
502 error = ixv_allocate_msix(adapter, pa);
503 if (error) {
504 device_printf(dev, "ixv_allocate_msix() failed!\n");
505 goto err_late;
506 }
507
508 /* Do the stats setup */
509 ixv_save_stats(adapter);
510 ixv_init_stats(adapter);
511 ixv_add_stats_sysctls(adapter);
512
513 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
514 ixgbe_netmap_attach(adapter);
515
516 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
517 aprint_verbose_dev(dev, "feature cap %s\n", buf);
518 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
519 aprint_verbose_dev(dev, "feature ena %s\n", buf);
520
521 INIT_DEBUGOUT("ixv_attach: end");
522 adapter->osdep.attached = true;
523
524 return;
525
526 err_late:
527 ixgbe_free_transmit_structures(adapter);
528 ixgbe_free_receive_structures(adapter);
529 free(adapter->queues, M_DEVBUF);
530 err_out:
531 ixv_free_pci_resources(adapter);
532 IXGBE_CORE_LOCK_DESTROY(adapter);
533
534 return;
535 } /* ixv_attach */
536
537 /************************************************************************
538 * ixv_detach - Device removal routine
539 *
540 * Called when the driver is being removed.
541 * Stops the adapter and deallocates all the resources
542 * that were allocated for driver operation.
543 *
544 * return 0 on success, positive on failure
545 ************************************************************************/
546 static int
547 ixv_detach(device_t dev, int flags)
548 {
549 struct adapter *adapter = device_private(dev);
550 struct ixgbe_hw *hw = &adapter->hw;
551 struct ix_queue *que = adapter->queues;
552 struct tx_ring *txr = adapter->tx_rings;
553 struct rx_ring *rxr = adapter->rx_rings;
554 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
555
556 INIT_DEBUGOUT("ixv_detach: begin");
557 if (adapter->osdep.attached == false)
558 return 0;
559
560 /* Stop the interface. Callouts are stopped in it. */
561 ixv_ifstop(adapter->ifp, 1);
562
563 #if NVLAN > 0
564 /* Make sure VLANs are not using driver */
565 if (!VLAN_ATTACHED(&adapter->osdep.ec))
566 ; /* nothing to do: no VLANs */
567 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
568 vlan_ifdetach(adapter->ifp);
569 else {
570 aprint_error_dev(dev, "VLANs in use, detach first\n");
571 return EBUSY;
572 }
573 #endif
574
575 IXGBE_CORE_LOCK(adapter);
576 ixv_stop(adapter);
577 IXGBE_CORE_UNLOCK(adapter);
578
579 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
580 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
581 softint_disestablish(txr->txr_si);
582 softint_disestablish(que->que_si);
583 }
584
585 /* Drain the Mailbox(link) queue */
586 softint_disestablish(adapter->link_si);
587
588 /* Unregister VLAN events */
589 #if 0 /* XXX msaitoh delete after write? */
590 if (adapter->vlan_attach != NULL)
591 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
592 if (adapter->vlan_detach != NULL)
593 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
594 #endif
595
596 ether_ifdetach(adapter->ifp);
597 callout_halt(&adapter->timer, NULL);
598
599 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
600 netmap_detach(adapter->ifp);
601
602 ixv_free_pci_resources(adapter);
603 #if 0 /* XXX the NetBSD port is probably missing something here */
604 bus_generic_detach(dev);
605 #endif
606 if_detach(adapter->ifp);
607 if_percpuq_destroy(adapter->ipq);
608
609 sysctl_teardown(&adapter->sysctllog);
610 evcnt_detach(&adapter->handleq);
611 evcnt_detach(&adapter->req);
612 evcnt_detach(&adapter->efbig_tx_dma_setup);
613 evcnt_detach(&adapter->mbuf_defrag_failed);
614 evcnt_detach(&adapter->efbig2_tx_dma_setup);
615 evcnt_detach(&adapter->einval_tx_dma_setup);
616 evcnt_detach(&adapter->other_tx_dma_setup);
617 evcnt_detach(&adapter->eagain_tx_dma_setup);
618 evcnt_detach(&adapter->enomem_tx_dma_setup);
619 evcnt_detach(&adapter->watchdog_events);
620 evcnt_detach(&adapter->tso_err);
621 evcnt_detach(&adapter->link_irq);
622
623 txr = adapter->tx_rings;
624 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
625 evcnt_detach(&adapter->queues[i].irqs);
626 evcnt_detach(&txr->no_desc_avail);
627 evcnt_detach(&txr->total_packets);
628 evcnt_detach(&txr->tso_tx);
629 #ifndef IXGBE_LEGACY_TX
630 evcnt_detach(&txr->pcq_drops);
631 #endif
632
633 evcnt_detach(&rxr->rx_packets);
634 evcnt_detach(&rxr->rx_bytes);
635 evcnt_detach(&rxr->rx_copies);
636 evcnt_detach(&rxr->no_jmbuf);
637 evcnt_detach(&rxr->rx_discarded);
638 }
639 evcnt_detach(&stats->ipcs);
640 evcnt_detach(&stats->l4cs);
641 evcnt_detach(&stats->ipcs_bad);
642 evcnt_detach(&stats->l4cs_bad);
643
644 /* Packet Reception Stats */
645 evcnt_detach(&stats->vfgorc);
646 evcnt_detach(&stats->vfgprc);
647 evcnt_detach(&stats->vfmprc);
648
649 /* Packet Transmission Stats */
650 evcnt_detach(&stats->vfgotc);
651 evcnt_detach(&stats->vfgptc);
652
653 /* Mailbox Stats */
654 evcnt_detach(&hw->mbx.stats.msgs_tx);
655 evcnt_detach(&hw->mbx.stats.msgs_rx);
656 evcnt_detach(&hw->mbx.stats.acks);
657 evcnt_detach(&hw->mbx.stats.reqs);
658 evcnt_detach(&hw->mbx.stats.rsts);
659
660 ixgbe_free_transmit_structures(adapter);
661 ixgbe_free_receive_structures(adapter);
662 free(adapter->queues, M_DEVBUF);
663
664 IXGBE_CORE_LOCK_DESTROY(adapter);
665
666 return (0);
667 } /* ixv_detach */
668
669 /************************************************************************
670 * ixv_init_locked - Init entry point
671 *
672 * Used in two ways: It is used by the stack as an init entry
673 * point in network interface structure. It is also used
674 * by the driver as a hw/sw initialization routine to get
675 * to a consistent state.
676 *
677 * return 0 on success, positive on failure
678 ************************************************************************/
679 static void
680 ixv_init_locked(struct adapter *adapter)
681 {
682 struct ifnet *ifp = adapter->ifp;
683 device_t dev = adapter->dev;
684 struct ixgbe_hw *hw = &adapter->hw;
685 struct ix_queue *que = adapter->queues;
686 int error = 0;
687 uint32_t mask;
688 int i;
689
690 INIT_DEBUGOUT("ixv_init_locked: begin");
691 KASSERT(mutex_owned(&adapter->core_mtx));
692 hw->adapter_stopped = FALSE;
693 hw->mac.ops.stop_adapter(hw);
694 callout_stop(&adapter->timer);
695
696 /* reprogram the RAR[0] in case user changed it. */
697 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
698
699 /* Get the latest mac address, User can use a LAA */
700 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
701 IXGBE_ETH_LENGTH_OF_ADDRESS);
702 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
703
704 /* Prepare transmit descriptors and buffers */
705 if (ixgbe_setup_transmit_structures(adapter)) {
706 aprint_error_dev(dev, "Could not setup transmit structures\n");
707 ixv_stop(adapter);
708 return;
709 }
710
711 /* Reset VF and renegotiate mailbox API version */
712 hw->mac.ops.reset_hw(hw);
713 error = ixv_negotiate_api(adapter);
714 if (error)
715 device_printf(dev,
716 "Mailbox API negotiation failed in init_locked!\n");
717
718 ixv_initialize_transmit_units(adapter);
719
720 /* Setup Multicast table */
721 ixv_set_multi(adapter);
722
723 /*
724 * Determine the correct mbuf pool
725 * for doing jumbo/headersplit
726 */
727 if (ifp->if_mtu > ETHERMTU)
728 adapter->rx_mbuf_sz = MJUMPAGESIZE;
729 else
730 adapter->rx_mbuf_sz = MCLBYTES;
731
732 /* Prepare receive descriptors and buffers */
733 if (ixgbe_setup_receive_structures(adapter)) {
734 device_printf(dev, "Could not setup receive structures\n");
735 ixv_stop(adapter);
736 return;
737 }
738
739 /* Configure RX settings */
740 ixv_initialize_receive_units(adapter);
741
742 #if 0 /* XXX isn't it required? -- msaitoh */
743 /* Set the various hardware offload abilities */
744 ifp->if_hwassist = 0;
745 if (ifp->if_capenable & IFCAP_TSO4)
746 ifp->if_hwassist |= CSUM_TSO;
747 if (ifp->if_capenable & IFCAP_TXCSUM) {
748 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
749 #if __FreeBSD_version >= 800000
750 ifp->if_hwassist |= CSUM_SCTP;
751 #endif
752 }
753 #endif
754
755 /* Set up VLAN offload and filter */
756 ixv_setup_vlan_support(adapter);
757
758 /* Set up MSI-X routing */
759 ixv_configure_ivars(adapter);
760
761 /* Set up auto-mask */
762 mask = (1 << adapter->vector);
763 for (i = 0; i < adapter->num_queues; i++, que++)
764 mask |= (1 << que->msix);
765 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
766
767 /* Set moderation on the Link interrupt */
768 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
769
770 /* Stats init */
771 ixv_init_stats(adapter);
772
773 /* Config/Enable Link */
774 hw->mac.get_link_status = TRUE;
775 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
776 FALSE);
777
778 /* Start watchdog */
779 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
780
781 /* And now turn on interrupts */
782 ixv_enable_intr(adapter);
783
784 /* Now inform the stack we're ready */
785 ifp->if_flags |= IFF_RUNNING;
786 ifp->if_flags &= ~IFF_OACTIVE;
787
788 return;
789 } /* ixv_init_locked */
790
791 /*
792 * MSI-X Interrupt Handlers and Tasklets
793 */
794
795 static inline void
796 ixv_enable_queue(struct adapter *adapter, u32 vector)
797 {
798 struct ixgbe_hw *hw = &adapter->hw;
799 u32 queue = 1 << vector;
800 u32 mask;
801
802 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
803 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
804 } /* ixv_enable_queue */
805
806 static inline void
807 ixv_disable_queue(struct adapter *adapter, u32 vector)
808 {
809 struct ixgbe_hw *hw = &adapter->hw;
810 u64 queue = (u64)(1 << vector);
811 u32 mask;
812
813 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
814 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
815 } /* ixv_disable_queue */
816
817 static inline void
818 ixv_rearm_queues(struct adapter *adapter, u64 queues)
819 {
820 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
822 } /* ixv_rearm_queues */
823
824
825 /************************************************************************
826 * ixv_msix_que - MSI Queue Interrupt Service routine
827 ************************************************************************/
828 static int
829 ixv_msix_que(void *arg)
830 {
831 struct ix_queue *que = arg;
832 struct adapter *adapter = que->adapter;
833 struct tx_ring *txr = que->txr;
834 struct rx_ring *rxr = que->rxr;
835 bool more;
836 u32 newitr = 0;
837
838 ixv_disable_queue(adapter, que->msix);
839 ++que->irqs.ev_count;
840
841 #ifdef __NetBSD__
842 /* Don't run ixgbe_rxeof in interrupt context */
843 more = true;
844 #else
845 more = ixgbe_rxeof(que);
846 #endif
847
848 IXGBE_TX_LOCK(txr);
849 ixgbe_txeof(txr);
850 IXGBE_TX_UNLOCK(txr);
851
852 /* Do AIM now? */
853
854 if (adapter->enable_aim == false)
855 goto no_calc;
856 /*
857 * Do Adaptive Interrupt Moderation:
858 * - Write out last calculated setting
859 * - Calculate based on average size over
860 * the last interval.
861 */
862 if (que->eitr_setting)
863 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
864 que->eitr_setting);
865
866 que->eitr_setting = 0;
867
868 /* Idle, do nothing */
869 if ((txr->bytes == 0) && (rxr->bytes == 0))
870 goto no_calc;
871
872 if ((txr->bytes) && (txr->packets))
873 newitr = txr->bytes/txr->packets;
874 if ((rxr->bytes) && (rxr->packets))
875 newitr = max(newitr, (rxr->bytes / rxr->packets));
876 newitr += 24; /* account for hardware frame, crc */
877
878 /* set an upper boundary */
879 newitr = min(newitr, 3000);
880
881 /* Be nice to the mid range */
882 if ((newitr > 300) && (newitr < 1200))
883 newitr = (newitr / 3);
884 else
885 newitr = (newitr / 2);
886
887 newitr |= newitr << 16;
888
889 /* save for next interrupt */
890 que->eitr_setting = newitr;
891
892 /* Reset state */
893 txr->bytes = 0;
894 txr->packets = 0;
895 rxr->bytes = 0;
896 rxr->packets = 0;
897
898 no_calc:
899 if (more)
900 softint_schedule(que->que_si);
901 else /* Re-enable this interrupt */
902 ixv_enable_queue(adapter, que->msix);
903
904 return 1;
905 } /* ixv_msix_que */
906
907 /************************************************************************
908 * ixv_msix_mbx
909 ************************************************************************/
910 static int
911 ixv_msix_mbx(void *arg)
912 {
913 struct adapter *adapter = arg;
914 struct ixgbe_hw *hw = &adapter->hw;
915
916 ++adapter->link_irq.ev_count;
917 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
918
919 /* Link status change */
920 hw->mac.get_link_status = TRUE;
921 softint_schedule(adapter->link_si);
922
923 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
924
925 return 1;
926 } /* ixv_msix_mbx */
927
928 /************************************************************************
929 * ixv_media_status - Media Ioctl callback
930 *
931 * Called whenever the user queries the status of
932 * the interface using ifconfig.
933 ************************************************************************/
934 static void
935 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
936 {
937 struct adapter *adapter = ifp->if_softc;
938
939 INIT_DEBUGOUT("ixv_media_status: begin");
940 IXGBE_CORE_LOCK(adapter);
941 ixv_update_link_status(adapter);
942
943 ifmr->ifm_status = IFM_AVALID;
944 ifmr->ifm_active = IFM_ETHER;
945
946 if (!adapter->link_active) {
947 ifmr->ifm_active |= IFM_NONE;
948 IXGBE_CORE_UNLOCK(adapter);
949 return;
950 }
951
952 ifmr->ifm_status |= IFM_ACTIVE;
953
954 switch (adapter->link_speed) {
955 case IXGBE_LINK_SPEED_10GB_FULL:
956 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
957 break;
958 case IXGBE_LINK_SPEED_1GB_FULL:
959 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
960 break;
961 case IXGBE_LINK_SPEED_100_FULL:
962 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
963 break;
964 case IXGBE_LINK_SPEED_10_FULL:
965 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
966 break;
967 }
968
969 IXGBE_CORE_UNLOCK(adapter);
970
971 return;
972 } /* ixv_media_status */
973
974 /************************************************************************
975 * ixv_media_change - Media Ioctl callback
976 *
977 * Called when the user changes speed/duplex using
978 * media/mediopt option with ifconfig.
979 ************************************************************************/
980 static int
981 ixv_media_change(struct ifnet *ifp)
982 {
983 struct adapter *adapter = ifp->if_softc;
984 struct ifmedia *ifm = &adapter->media;
985
986 INIT_DEBUGOUT("ixv_media_change: begin");
987
988 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
989 return (EINVAL);
990
991 switch (IFM_SUBTYPE(ifm->ifm_media)) {
992 case IFM_AUTO:
993 break;
994 default:
995 device_printf(adapter->dev, "Only auto media type\n");
996 return (EINVAL);
997 }
998
999 return (0);
1000 } /* ixv_media_change */
1001
1002
1003 /************************************************************************
1004 * ixv_negotiate_api
1005 *
1006 * Negotiate the Mailbox API with the PF;
1007 * start with the most featured API first.
1008 ************************************************************************/
1009 static int
1010 ixv_negotiate_api(struct adapter *adapter)
1011 {
1012 struct ixgbe_hw *hw = &adapter->hw;
1013 int mbx_api[] = { ixgbe_mbox_api_11,
1014 ixgbe_mbox_api_10,
1015 ixgbe_mbox_api_unknown };
1016 int i = 0;
1017
1018 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1019 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1020 return (0);
1021 i++;
1022 }
1023
1024 return (EINVAL);
1025 } /* ixv_negotiate_api */
1026
1027
1028 /************************************************************************
1029 * ixv_set_multi - Multicast Update
1030 *
1031 * Called whenever multicast address list is updated.
1032 ************************************************************************/
1033 static void
1034 ixv_set_multi(struct adapter *adapter)
1035 {
1036 struct ether_multi *enm;
1037 struct ether_multistep step;
1038 struct ethercom *ec = &adapter->osdep.ec;
1039 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1040 u8 *update_ptr;
1041 int mcnt = 0;
1042
1043 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1044
1045 ETHER_FIRST_MULTI(step, ec, enm);
1046 while (enm != NULL) {
1047 bcopy(enm->enm_addrlo,
1048 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1049 IXGBE_ETH_LENGTH_OF_ADDRESS);
1050 mcnt++;
1051 /* XXX This might be required --msaitoh */
1052 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1053 break;
1054 ETHER_NEXT_MULTI(step, enm);
1055 }
1056
1057 update_ptr = mta;
1058
1059 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1060 ixv_mc_array_itr, TRUE);
1061
1062 return;
1063 } /* ixv_set_multi */
1064
1065 /************************************************************************
1066 * ixv_mc_array_itr
1067 *
1068 * An iterator function needed by the multicast shared code.
1069 * It feeds the shared code routine the addresses in the
1070 * array of ixv_set_multi() one by one.
1071 ************************************************************************/
1072 static u8 *
1073 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1074 {
1075 u8 *addr = *update_ptr;
1076 u8 *newptr;
1077 *vmdq = 0;
1078
1079 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1080 *update_ptr = newptr;
1081
1082 return addr;
1083 } /* ixv_mc_array_itr */
1084
1085 /************************************************************************
1086 * ixv_local_timer - Timer routine
1087 *
1088 * Checks for link status, updates statistics,
1089 * and runs the watchdog check.
1090 ************************************************************************/
1091 static void
1092 ixv_local_timer(void *arg)
1093 {
1094 struct adapter *adapter = arg;
1095
1096 IXGBE_CORE_LOCK(adapter);
1097 ixv_local_timer_locked(adapter);
1098 IXGBE_CORE_UNLOCK(adapter);
1099 }
1100
1101 static void
1102 ixv_local_timer_locked(void *arg)
1103 {
1104 struct adapter *adapter = arg;
1105 device_t dev = adapter->dev;
1106 struct ix_queue *que = adapter->queues;
1107 u64 queues = 0;
1108 int hung = 0;
1109
1110 KASSERT(mutex_owned(&adapter->core_mtx));
1111
1112 ixv_check_link(adapter);
1113
1114 /* Stats Update */
1115 ixv_update_stats(adapter);
1116
1117 /*
1118 * Check the TX queues status
1119 * - mark hung queues so we don't schedule on them
1120 * - watchdog only if all queues show hung
1121 */
1122 for (int i = 0; i < adapter->num_queues; i++, que++) {
1123 /* Keep track of queues with work for soft irq */
1124 if (que->txr->busy)
1125 queues |= ((u64)1 << que->me);
1126 /*
1127 * Each time txeof runs without cleaning, but there
1128 * are uncleaned descriptors it increments busy. If
1129 * we get to the MAX we declare it hung.
1130 */
1131 if (que->busy == IXGBE_QUEUE_HUNG) {
1132 ++hung;
1133 /* Mark the queue as inactive */
1134 adapter->active_queues &= ~((u64)1 << que->me);
1135 continue;
1136 } else {
1137 /* Check if we've come back from hung */
1138 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1139 adapter->active_queues |= ((u64)1 << que->me);
1140 }
1141 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1142 device_printf(dev,
1143 "Warning queue %d appears to be hung!\n", i);
1144 que->txr->busy = IXGBE_QUEUE_HUNG;
1145 ++hung;
1146 }
1147 }
1148
1149 /* Only truly watchdog if all queues show hung */
1150 if (hung == adapter->num_queues)
1151 goto watchdog;
1152 else if (queues != 0) { /* Force an IRQ on queues with work */
1153 ixv_rearm_queues(adapter, queues);
1154 }
1155
1156 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1157
1158 return;
1159
1160 watchdog:
1161
1162 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1163 adapter->ifp->if_flags &= ~IFF_RUNNING;
1164 adapter->watchdog_events.ev_count++;
1165 ixv_init_locked(adapter);
1166 } /* ixv_local_timer */
1167
1168 /************************************************************************
1169 * ixv_update_link_status - Update OS on link state
1170 *
1171 * Note: Only updates the OS on the cached link state.
1172 * The real check of the hardware only happens with
1173 * a link interrupt.
1174 ************************************************************************/
1175 static void
1176 ixv_update_link_status(struct adapter *adapter)
1177 {
1178 struct ifnet *ifp = adapter->ifp;
1179 device_t dev = adapter->dev;
1180
1181 if (adapter->link_up) {
1182 if (adapter->link_active == FALSE) {
1183 if (bootverbose) {
1184 const char *bpsmsg;
1185
1186 switch (adapter->link_speed) {
1187 case IXGBE_LINK_SPEED_10GB_FULL:
1188 bpsmsg = "10 Gbps";
1189 break;
1190 case IXGBE_LINK_SPEED_5GB_FULL:
1191 bpsmsg = "5 Gbps";
1192 break;
1193 case IXGBE_LINK_SPEED_2_5GB_FULL:
1194 bpsmsg = "2.5 Gbps";
1195 break;
1196 case IXGBE_LINK_SPEED_1GB_FULL:
1197 bpsmsg = "1 Gbps";
1198 break;
1199 case IXGBE_LINK_SPEED_100_FULL:
1200 bpsmsg = "100 Mbps";
1201 break;
1202 case IXGBE_LINK_SPEED_10_FULL:
1203 bpsmsg = "10 Mbps";
1204 break;
1205 default:
1206 bpsmsg = "unknown speed";
1207 break;
1208 }
1209 device_printf(dev, "Link is up %s %s \n",
1210 bpsmsg, "Full Duplex");
1211 }
1212 adapter->link_active = TRUE;
1213 if_link_state_change(ifp, LINK_STATE_UP);
1214 }
1215 } else { /* Link down */
1216 if (adapter->link_active == TRUE) {
1217 if (bootverbose)
1218 device_printf(dev, "Link is Down\n");
1219 if_link_state_change(ifp, LINK_STATE_DOWN);
1220 adapter->link_active = FALSE;
1221 }
1222 }
1223
1224 return;
1225 } /* ixv_update_link_status */
1226
1227
1228 /************************************************************************
1229 * ixv_stop - Stop the hardware
1230 *
1231 * Disables all traffic on the adapter by issuing a
1232 * global reset on the MAC and deallocates TX/RX buffers.
1233 ************************************************************************/
1234 static void
1235 ixv_ifstop(struct ifnet *ifp, int disable)
1236 {
1237 struct adapter *adapter = ifp->if_softc;
1238
1239 IXGBE_CORE_LOCK(adapter);
1240 ixv_stop(adapter);
1241 IXGBE_CORE_UNLOCK(adapter);
1242 }
1243
1244 static void
1245 ixv_stop(void *arg)
1246 {
1247 struct ifnet *ifp;
1248 struct adapter *adapter = arg;
1249 struct ixgbe_hw *hw = &adapter->hw;
1250
1251 ifp = adapter->ifp;
1252
1253 KASSERT(mutex_owned(&adapter->core_mtx));
1254
1255 INIT_DEBUGOUT("ixv_stop: begin\n");
1256 ixv_disable_intr(adapter);
1257
1258 /* Tell the stack that the interface is no longer active */
1259 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1260
1261 hw->mac.ops.reset_hw(hw);
1262 adapter->hw.adapter_stopped = FALSE;
1263 hw->mac.ops.stop_adapter(hw);
1264 callout_stop(&adapter->timer);
1265
1266 /* reprogram the RAR[0] in case user changed it. */
1267 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1268
1269 return;
1270 } /* ixv_stop */
1271
1272
1273 /************************************************************************
1274 * ixv_allocate_pci_resources
1275 ************************************************************************/
1276 static int
1277 ixv_allocate_pci_resources(struct adapter *adapter,
1278 const struct pci_attach_args *pa)
1279 {
1280 pcireg_t memtype;
1281 device_t dev = adapter->dev;
1282 bus_addr_t addr;
1283 int flags;
1284
1285 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1286 switch (memtype) {
1287 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1288 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1289 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1290 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1291 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1292 goto map_err;
1293 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1294 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1295 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1296 }
1297 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1298 adapter->osdep.mem_size, flags,
1299 &adapter->osdep.mem_bus_space_handle) != 0) {
1300 map_err:
1301 adapter->osdep.mem_size = 0;
1302 aprint_error_dev(dev, "unable to map BAR0\n");
1303 return ENXIO;
1304 }
1305 break;
1306 default:
1307 aprint_error_dev(dev, "unexpected type on BAR0\n");
1308 return ENXIO;
1309 }
1310
1311 /* Pick up the tuneable queues */
1312 adapter->num_queues = ixv_num_queues;
1313
1314 return (0);
1315 } /* ixv_allocate_pci_resources */
1316
1317 /************************************************************************
1318 * ixv_free_pci_resources
1319 ************************************************************************/
1320 static void
1321 ixv_free_pci_resources(struct adapter * adapter)
1322 {
1323 struct ix_queue *que = adapter->queues;
1324 int rid;
1325
1326 /*
1327 * Release all msix queue resources:
1328 */
1329 for (int i = 0; i < adapter->num_queues; i++, que++) {
1330 if (que->res != NULL)
1331 pci_intr_disestablish(adapter->osdep.pc,
1332 adapter->osdep.ihs[i]);
1333 }
1334
1335
1336 /* Clean the Mailbox interrupt last */
1337 rid = adapter->vector;
1338
1339 if (adapter->osdep.ihs[rid] != NULL) {
1340 pci_intr_disestablish(adapter->osdep.pc,
1341 adapter->osdep.ihs[rid]);
1342 adapter->osdep.ihs[rid] = NULL;
1343 }
1344
1345 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1346 adapter->osdep.nintrs);
1347
1348 if (adapter->osdep.mem_size != 0) {
1349 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1350 adapter->osdep.mem_bus_space_handle,
1351 adapter->osdep.mem_size);
1352 }
1353
1354 return;
1355 } /* ixv_free_pci_resources */
1356
1357 /************************************************************************
1358 * ixv_setup_interface
1359 *
1360 * Setup networking device structure and register an interface.
1361 ************************************************************************/
1362 static void
1363 ixv_setup_interface(device_t dev, struct adapter *adapter)
1364 {
1365 struct ethercom *ec = &adapter->osdep.ec;
1366 struct ifnet *ifp;
1367
1368 INIT_DEBUGOUT("ixv_setup_interface: begin");
1369
1370 ifp = adapter->ifp = &ec->ec_if;
1371 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1372 ifp->if_baudrate = IF_Gbps(10);
1373 ifp->if_init = ixv_init;
1374 ifp->if_stop = ixv_ifstop;
1375 ifp->if_softc = adapter;
1376 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1377 #ifdef IXGBE_MPSAFE
1378 ifp->if_extflags = IFEF_START_MPSAFE;
1379 #endif
1380 ifp->if_ioctl = ixv_ioctl;
1381 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1382 #if 0
1383 ixv_start_locked = ixgbe_legacy_start_locked;
1384 #endif
1385 } else {
1386 ifp->if_transmit = ixgbe_mq_start;
1387 #if 0
1388 ixv_start_locked = ixgbe_mq_start_locked;
1389 #endif
1390 }
1391 ifp->if_start = ixgbe_legacy_start;
1392 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1393 IFQ_SET_READY(&ifp->if_snd);
1394
1395 if_initialize(ifp);
1396 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1397 ether_ifattach(ifp, adapter->hw.mac.addr);
1398 /*
1399 * We use per TX queue softint, so if_deferred_start_init() isn't
1400 * used.
1401 */
1402 if_register(ifp);
1403 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1404
1405 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1406
1407 /*
1408 * Tell the upper layer(s) we support long frames.
1409 */
1410 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1411
1412 /* Set capability flags */
1413 ifp->if_capabilities |= IFCAP_HWCSUM
1414 | IFCAP_TSOv4
1415 | IFCAP_TSOv6;
1416 ifp->if_capenable = 0;
1417
1418 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1419 | ETHERCAP_VLAN_HWCSUM
1420 | ETHERCAP_JUMBO_MTU
1421 | ETHERCAP_VLAN_MTU;
1422
1423 /* Enable the above capabilities by default */
1424 ec->ec_capenable = ec->ec_capabilities;
1425
1426 /* Don't enable LRO by default */
1427 ifp->if_capabilities |= IFCAP_LRO;
1428 #if 0
1429 ifp->if_capenable = ifp->if_capabilities;
1430 #endif
1431
1432 /*
1433 * Specify the media types supported by this adapter and register
1434 * callbacks to update media and link information
1435 */
1436 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1437 ixv_media_status);
1438 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1439 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1440
1441 return;
1442 } /* ixv_setup_interface */
1443
1444
1445 /************************************************************************
1446 * ixv_initialize_transmit_units - Enable transmit unit.
1447 ************************************************************************/
1448 static void
1449 ixv_initialize_transmit_units(struct adapter *adapter)
1450 {
1451 struct tx_ring *txr = adapter->tx_rings;
1452 struct ixgbe_hw *hw = &adapter->hw;
1453
1454
1455 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1456 u64 tdba = txr->txdma.dma_paddr;
1457 u32 txctrl, txdctl;
1458
1459 /* Set WTHRESH to 8, burst writeback */
1460 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1461 txdctl |= (8 << 16);
1462 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1463
1464 /* Set the HW Tx Head and Tail indices */
1465 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1466 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1467
1468 /* Set Tx Tail register */
1469 txr->tail = IXGBE_VFTDT(i);
1470
1471 /* Set Ring parameters */
1472 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1473 (tdba & 0x00000000ffffffffULL));
1474 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1475 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1476 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1477 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1478 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1479 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1480
1481 /* Now enable */
1482 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1483 txdctl |= IXGBE_TXDCTL_ENABLE;
1484 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1485 }
1486
1487 return;
1488 } /* ixv_initialize_transmit_units */
1489
1490
1491 /************************************************************************
1492 * ixv_initialize_rss_mapping
1493 ************************************************************************/
1494 static void
1495 ixv_initialize_rss_mapping(struct adapter *adapter)
1496 {
1497 struct ixgbe_hw *hw = &adapter->hw;
1498 u32 reta = 0, mrqc, rss_key[10];
1499 int queue_id;
1500 int i, j;
1501 u32 rss_hash_config;
1502
1503 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1504 /* Fetch the configured RSS key */
1505 rss_getkey((uint8_t *)&rss_key);
1506 } else {
1507 /* set up random bits */
1508 cprng_fast(&rss_key, sizeof(rss_key));
1509 }
1510
1511 /* Now fill out hash function seeds */
1512 for (i = 0; i < 10; i++)
1513 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1514
1515 /* Set up the redirection table */
1516 for (i = 0, j = 0; i < 64; i++, j++) {
1517 if (j == adapter->num_queues)
1518 j = 0;
1519
1520 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1521 /*
1522 * Fetch the RSS bucket id for the given indirection
1523 * entry. Cap it at the number of configured buckets
1524 * (which is num_queues.)
1525 */
1526 queue_id = rss_get_indirection_to_bucket(i);
1527 queue_id = queue_id % adapter->num_queues;
1528 } else
1529 queue_id = j;
1530
1531 /*
1532 * The low 8 bits are for hash value (n+0);
1533 * The next 8 bits are for hash value (n+1), etc.
1534 */
1535 reta >>= 8;
1536 reta |= ((uint32_t)queue_id) << 24;
1537 if ((i & 3) == 3) {
1538 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1539 reta = 0;
1540 }
1541 }
1542
1543 /* Perform hash on these packet types */
1544 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1545 rss_hash_config = rss_gethashconfig();
1546 else {
1547 /*
1548 * Disable UDP - IP fragments aren't currently being handled
1549 * and so we end up with a mix of 2-tuple and 4-tuple
1550 * traffic.
1551 */
1552 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1553 | RSS_HASHTYPE_RSS_TCP_IPV4
1554 | RSS_HASHTYPE_RSS_IPV6
1555 | RSS_HASHTYPE_RSS_TCP_IPV6;
1556 }
1557
1558 mrqc = IXGBE_MRQC_RSSEN;
1559 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1560 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1561 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1562 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1563 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1564 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1565 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1566 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1567 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1568 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1569 __func__);
1570 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1571 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1572 __func__);
1573 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1574 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1575 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1576 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1577 __func__);
1578 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1579 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1580 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1581 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1582 __func__);
1583 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1584 } /* ixv_initialize_rss_mapping */
1585
1586
1587 /************************************************************************
1588 * ixv_initialize_receive_units - Setup receive registers and features.
1589 ************************************************************************/
1590 static void
1591 ixv_initialize_receive_units(struct adapter *adapter)
1592 {
1593 struct rx_ring *rxr = adapter->rx_rings;
1594 struct ixgbe_hw *hw = &adapter->hw;
1595 struct ifnet *ifp = adapter->ifp;
1596 u32 bufsz, rxcsum, psrtype;
1597
1598 if (ifp->if_mtu > ETHERMTU)
1599 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1600 else
1601 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1602
1603 psrtype = IXGBE_PSRTYPE_TCPHDR
1604 | IXGBE_PSRTYPE_UDPHDR
1605 | IXGBE_PSRTYPE_IPV4HDR
1606 | IXGBE_PSRTYPE_IPV6HDR
1607 | IXGBE_PSRTYPE_L2HDR;
1608
1609 if (adapter->num_queues > 1)
1610 psrtype |= 1 << 29;
1611
1612 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1613
1614 /* Tell PF our max_frame size */
1615 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1616 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1617 }
1618
1619 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1620 u64 rdba = rxr->rxdma.dma_paddr;
1621 u32 reg, rxdctl;
1622
1623 /* Disable the queue */
1624 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1625 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1626 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1627 for (int j = 0; j < 10; j++) {
1628 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1629 IXGBE_RXDCTL_ENABLE)
1630 msec_delay(1);
1631 else
1632 break;
1633 }
1634 wmb();
1635 /* Setup the Base and Length of the Rx Descriptor Ring */
1636 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1637 (rdba & 0x00000000ffffffffULL));
1638 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1639 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1640 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1641
1642 /* Reset the ring indices */
1643 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1644 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1645
1646 /* Set up the SRRCTL register */
1647 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1648 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1649 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1650 reg |= bufsz;
1651 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1652 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1653
1654 /* Capture Rx Tail index */
1655 rxr->tail = IXGBE_VFRDT(rxr->me);
1656
1657 /* Do the queue enabling last */
1658 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1659 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1660 for (int k = 0; k < 10; k++) {
1661 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1662 IXGBE_RXDCTL_ENABLE)
1663 break;
1664 msec_delay(1);
1665 }
1666 wmb();
1667
1668 /* Set the Tail Pointer */
1669 /*
1670 * In netmap mode, we must preserve the buffers made
1671 * available to userspace before the if_init()
1672 * (this is true by default on the TX side, because
1673 * init makes all buffers available to userspace).
1674 *
1675 * netmap_reset() and the device specific routines
1676 * (e.g. ixgbe_setup_receive_rings()) map these
1677 * buffers at the end of the NIC ring, so here we
1678 * must set the RDT (tail) register to make sure
1679 * they are not overwritten.
1680 *
1681 * In this driver the NIC ring starts at RDH = 0,
1682 * RDT points to the last slot available for reception (?),
1683 * so RDT = num_rx_desc - 1 means the whole ring is available.
1684 */
1685 #ifdef DEV_NETMAP
1686 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1687 (ifp->if_capenable & IFCAP_NETMAP)) {
1688 struct netmap_adapter *na = NA(adapter->ifp);
1689 struct netmap_kring *kring = &na->rx_rings[i];
1690 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1691
1692 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1693 } else
1694 #endif /* DEV_NETMAP */
1695 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1696 adapter->num_rx_desc - 1);
1697 }
1698
1699 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1700
1701 ixv_initialize_rss_mapping(adapter);
1702
1703 if (adapter->num_queues > 1) {
1704 /* RSS and RX IPP Checksum are mutually exclusive */
1705 rxcsum |= IXGBE_RXCSUM_PCSD;
1706 }
1707
1708 if (ifp->if_capenable & IFCAP_RXCSUM)
1709 rxcsum |= IXGBE_RXCSUM_PCSD;
1710
1711 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1712 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1713
1714 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1715
1716 return;
1717 } /* ixv_initialize_receive_units */
1718
1719 /************************************************************************
1720 * ixv_setup_vlan_support
1721 ************************************************************************/
1722 static void
1723 ixv_setup_vlan_support(struct adapter *adapter)
1724 {
1725 struct ethercom *ec = &adapter->osdep.ec;
1726 struct ixgbe_hw *hw = &adapter->hw;
1727 struct rx_ring *rxr;
1728 u32 ctrl, vid, vfta, retry;
1729
1730 /*
1731 * We get here thru init_locked, meaning
1732 * a soft reset, this has already cleared
1733 * the VFTA and other state, so if there
1734 * have been no vlan's registered do nothing.
1735 */
1736 if (!VLAN_ATTACHED(ec))
1737 return;
1738
1739 /* Enable the queues */
1740 for (int i = 0; i < adapter->num_queues; i++) {
1741 rxr = &adapter->rx_rings[i];
1742 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1743 ctrl |= IXGBE_RXDCTL_VME;
1744 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1745 /*
1746 * Let Rx path know that it needs to store VLAN tag
1747 * as part of extra mbuf info.
1748 */
1749 rxr->vtag_strip = TRUE;
1750 }
1751
1752 #if 1
1753 /* XXX dirty hack. Enable all VIDs */
1754 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1755 adapter->shadow_vfta[i] = 0xffffffff;
1756 #endif
1757 /*
1758 * A soft reset zero's out the VFTA, so
1759 * we need to repopulate it now.
1760 */
1761 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1762 if (adapter->shadow_vfta[i] == 0)
1763 continue;
1764 vfta = adapter->shadow_vfta[i];
1765 /*
1766 * Reconstruct the vlan id's
1767 * based on the bits set in each
1768 * of the array ints.
1769 */
1770 for (int j = 0; j < 32; j++) {
1771 retry = 0;
1772 if ((vfta & (1 << j)) == 0)
1773 continue;
1774 vid = (i * 32) + j;
1775 /* Call the shared code mailbox routine */
1776 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1777 if (++retry > 5)
1778 break;
1779 }
1780 }
1781 }
1782 } /* ixv_setup_vlan_support */
1783
1784 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1785 /************************************************************************
1786 * ixv_register_vlan
1787 *
1788 * Run via a vlan config EVENT, it enables us to use the
1789 * HW Filter table since we can get the vlan id. This just
1790 * creates the entry in the soft version of the VFTA, init
1791 * will repopulate the real table.
1792 ************************************************************************/
1793 static void
1794 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1795 {
1796 struct adapter *adapter = ifp->if_softc;
1797 u16 index, bit;
1798
1799 if (ifp->if_softc != arg) /* Not our event */
1800 return;
1801
1802 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1803 return;
1804
1805 IXGBE_CORE_LOCK(adapter);
1806 index = (vtag >> 5) & 0x7F;
1807 bit = vtag & 0x1F;
1808 adapter->shadow_vfta[index] |= (1 << bit);
1809 /* Re-init to load the changes */
1810 ixv_init_locked(adapter);
1811 IXGBE_CORE_UNLOCK(adapter);
1812 } /* ixv_register_vlan */
1813
1814 /************************************************************************
1815 * ixv_unregister_vlan
1816 *
1817 * Run via a vlan unconfig EVENT, remove our entry
1818 * in the soft vfta.
1819 ************************************************************************/
1820 static void
1821 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1822 {
1823 struct adapter *adapter = ifp->if_softc;
1824 u16 index, bit;
1825
1826 if (ifp->if_softc != arg)
1827 return;
1828
1829 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1830 return;
1831
1832 IXGBE_CORE_LOCK(adapter);
1833 index = (vtag >> 5) & 0x7F;
1834 bit = vtag & 0x1F;
1835 adapter->shadow_vfta[index] &= ~(1 << bit);
1836 /* Re-init to load the changes */
1837 ixv_init_locked(adapter);
1838 IXGBE_CORE_UNLOCK(adapter);
1839 } /* ixv_unregister_vlan */
1840 #endif
1841
1842 /************************************************************************
1843 * ixv_enable_intr
1844 ************************************************************************/
1845 static void
1846 ixv_enable_intr(struct adapter *adapter)
1847 {
1848 struct ixgbe_hw *hw = &adapter->hw;
1849 struct ix_queue *que = adapter->queues;
1850 u32 mask;
1851 int i;
1852
1853 /* For VTEIAC */
1854 mask = (1 << adapter->vector);
1855 for (i = 0; i < adapter->num_queues; i++, que++)
1856 mask |= (1 << que->msix);
1857 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1858
1859 /* For VTEIMS */
1860 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
1861 que = adapter->queues;
1862 for (i = 0; i < adapter->num_queues; i++, que++)
1863 ixv_enable_queue(adapter, que->msix);
1864
1865 IXGBE_WRITE_FLUSH(hw);
1866
1867 return;
1868 } /* ixv_enable_intr */
1869
1870 /************************************************************************
1871 * ixv_disable_intr
1872 ************************************************************************/
1873 static void
1874 ixv_disable_intr(struct adapter *adapter)
1875 {
1876 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1877 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1878 IXGBE_WRITE_FLUSH(&adapter->hw);
1879
1880 return;
1881 } /* ixv_disable_intr */
1882
1883 /************************************************************************
1884 * ixv_set_ivar
1885 *
1886 * Setup the correct IVAR register for a particular MSI-X interrupt
1887 * - entry is the register array entry
1888 * - vector is the MSI-X vector for this queue
1889 * - type is RX/TX/MISC
1890 ************************************************************************/
1891 static void
1892 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1893 {
1894 struct ixgbe_hw *hw = &adapter->hw;
1895 u32 ivar, index;
1896
1897 vector |= IXGBE_IVAR_ALLOC_VAL;
1898
1899 if (type == -1) { /* MISC IVAR */
1900 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1901 ivar &= ~0xFF;
1902 ivar |= vector;
1903 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1904 } else { /* RX/TX IVARS */
1905 index = (16 * (entry & 1)) + (8 * type);
1906 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1907 ivar &= ~(0xFF << index);
1908 ivar |= (vector << index);
1909 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1910 }
1911 } /* ixv_set_ivar */
1912
1913 /************************************************************************
1914 * ixv_configure_ivars
1915 ************************************************************************/
1916 static void
1917 ixv_configure_ivars(struct adapter *adapter)
1918 {
1919 struct ix_queue *que = adapter->queues;
1920
1921 for (int i = 0; i < adapter->num_queues; i++, que++) {
1922 /* First the RX queue entry */
1923 ixv_set_ivar(adapter, i, que->msix, 0);
1924 /* ... and the TX */
1925 ixv_set_ivar(adapter, i, que->msix, 1);
1926 /* Set an initial value in EITR */
1927 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1928 IXGBE_EITR_DEFAULT);
1929 }
1930
1931 /* For the mailbox interrupt */
1932 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1933 } /* ixv_configure_ivars */
1934
1935
1936 /************************************************************************
1937 * ixv_save_stats
1938 *
1939 * The VF stats registers never have a truly virgin
1940 * starting point, so this routine tries to make an
1941 * artificial one, marking ground zero on attach as
1942 * it were.
1943 ************************************************************************/
1944 static void
1945 ixv_save_stats(struct adapter *adapter)
1946 {
1947 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1948
1949 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
1950 stats->saved_reset_vfgprc +=
1951 stats->vfgprc.ev_count - stats->base_vfgprc;
1952 stats->saved_reset_vfgptc +=
1953 stats->vfgptc.ev_count - stats->base_vfgptc;
1954 stats->saved_reset_vfgorc +=
1955 stats->vfgorc.ev_count - stats->base_vfgorc;
1956 stats->saved_reset_vfgotc +=
1957 stats->vfgotc.ev_count - stats->base_vfgotc;
1958 stats->saved_reset_vfmprc +=
1959 stats->vfmprc.ev_count - stats->base_vfmprc;
1960 }
1961 } /* ixv_save_stats */
1962
1963 /************************************************************************
1964 * ixv_init_stats
1965 ************************************************************************/
1966 static void
1967 ixv_init_stats(struct adapter *adapter)
1968 {
1969 struct ixgbe_hw *hw = &adapter->hw;
1970
1971 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1972 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1973 adapter->stats.vf.last_vfgorc |=
1974 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1975
1976 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1977 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1978 adapter->stats.vf.last_vfgotc |=
1979 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1980
1981 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1982
1983 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1984 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1985 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1986 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1987 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1988 } /* ixv_init_stats */
1989
1990 #define UPDATE_STAT_32(reg, last, count) \
1991 { \
1992 u32 current = IXGBE_READ_REG(hw, (reg)); \
1993 if (current < (last)) \
1994 count.ev_count += 0x100000000LL; \
1995 (last) = current; \
1996 count.ev_count &= 0xFFFFFFFF00000000LL; \
1997 count.ev_count |= current; \
1998 }
1999
2000 #define UPDATE_STAT_36(lsb, msb, last, count) \
2001 { \
2002 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2003 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2004 u64 current = ((cur_msb << 32) | cur_lsb); \
2005 if (current < (last)) \
2006 count.ev_count += 0x1000000000LL; \
2007 (last) = current; \
2008 count.ev_count &= 0xFFFFFFF000000000LL; \
2009 count.ev_count |= current; \
2010 }
2011
2012 /************************************************************************
2013 * ixv_update_stats - Update the board statistics counters.
2014 ************************************************************************/
2015 void
2016 ixv_update_stats(struct adapter *adapter)
2017 {
2018 struct ixgbe_hw *hw = &adapter->hw;
2019 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2020
2021 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2022 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2023 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2024 stats->vfgorc);
2025 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2026 stats->vfgotc);
2027 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2028
2029 /* Fill out the OS statistics structure */
2030 /*
2031 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2032 * adapter->stats counters. It's required to make ifconfig -z
2033 * (SOICZIFDATA) work.
2034 */
2035 } /* ixv_update_stats */
2036
2037 const struct sysctlnode *
2038 ixv_sysctl_instance(struct adapter *adapter)
2039 {
2040 const char *dvname;
2041 struct sysctllog **log;
2042 int rc;
2043 const struct sysctlnode *rnode;
2044
2045 log = &adapter->sysctllog;
2046 dvname = device_xname(adapter->dev);
2047
2048 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2049 0, CTLTYPE_NODE, dvname,
2050 SYSCTL_DESCR("ixv information and settings"),
2051 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2052 goto err;
2053
2054 return rnode;
2055 err:
2056 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2057 return NULL;
2058 }
2059
2060 static void
2061 ixv_add_device_sysctls(struct adapter *adapter)
2062 {
2063 struct sysctllog **log;
2064 const struct sysctlnode *rnode, *cnode;
2065 device_t dev;
2066
2067 dev = adapter->dev;
2068 log = &adapter->sysctllog;
2069
2070 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2071 aprint_error_dev(dev, "could not create sysctl root\n");
2072 return;
2073 }
2074
2075 if (sysctl_createv(log, 0, &rnode, &cnode,
2076 CTLFLAG_READWRITE, CTLTYPE_INT,
2077 "debug", SYSCTL_DESCR("Debug Info"),
2078 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2079 aprint_error_dev(dev, "could not create sysctl\n");
2080
2081 if (sysctl_createv(log, 0, &rnode, &cnode,
2082 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2083 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2084 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2085 aprint_error_dev(dev, "could not create sysctl\n");
2086 }
2087
2088 /************************************************************************
2089 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2090 ************************************************************************/
2091 static void
2092 ixv_add_stats_sysctls(struct adapter *adapter)
2093 {
2094 device_t dev = adapter->dev;
2095 struct tx_ring *txr = adapter->tx_rings;
2096 struct rx_ring *rxr = adapter->rx_rings;
2097 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2098 struct ixgbe_hw *hw = &adapter->hw;
2099 const struct sysctlnode *rnode;
2100 struct sysctllog **log = &adapter->sysctllog;
2101 const char *xname = device_xname(dev);
2102
2103 /* Driver Statistics */
2104 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2105 NULL, xname, "Handled queue in softint");
2106 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2107 NULL, xname, "Requeued in softint");
2108 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2109 NULL, xname, "Driver tx dma soft fail EFBIG");
2110 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2111 NULL, xname, "m_defrag() failed");
2112 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2113 NULL, xname, "Driver tx dma hard fail EFBIG");
2114 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2115 NULL, xname, "Driver tx dma hard fail EINVAL");
2116 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2117 NULL, xname, "Driver tx dma hard fail other");
2118 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2119 NULL, xname, "Driver tx dma soft fail EAGAIN");
2120 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2121 NULL, xname, "Driver tx dma soft fail ENOMEM");
2122 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2123 NULL, xname, "Watchdog timeouts");
2124 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2125 NULL, xname, "TSO errors");
2126 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2127 NULL, xname, "Link MSI-X IRQ Handled");
2128
2129 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2130 snprintf(adapter->queues[i].evnamebuf,
2131 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2132 xname, i);
2133 snprintf(adapter->queues[i].namebuf,
2134 sizeof(adapter->queues[i].namebuf), "q%d", i);
2135
2136 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2137 aprint_error_dev(dev, "could not create sysctl root\n");
2138 break;
2139 }
2140
2141 if (sysctl_createv(log, 0, &rnode, &rnode,
2142 0, CTLTYPE_NODE,
2143 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2144 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2145 break;
2146
2147 #if 0 /* not yet */
2148 if (sysctl_createv(log, 0, &rnode, &cnode,
2149 CTLFLAG_READWRITE, CTLTYPE_INT,
2150 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2151 ixgbe_sysctl_interrupt_rate_handler, 0,
2152 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2153 break;
2154
2155 if (sysctl_createv(log, 0, &rnode, &cnode,
2156 CTLFLAG_READONLY, CTLTYPE_QUAD,
2157 "irqs", SYSCTL_DESCR("irqs on this queue"),
2158 NULL, 0, &(adapter->queues[i].irqs),
2159 0, CTL_CREATE, CTL_EOL) != 0)
2160 break;
2161
2162 if (sysctl_createv(log, 0, &rnode, &cnode,
2163 CTLFLAG_READONLY, CTLTYPE_INT,
2164 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2165 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2166 0, CTL_CREATE, CTL_EOL) != 0)
2167 break;
2168
2169 if (sysctl_createv(log, 0, &rnode, &cnode,
2170 CTLFLAG_READONLY, CTLTYPE_INT,
2171 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2172 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2173 0, CTL_CREATE, CTL_EOL) != 0)
2174 break;
2175 #endif
2176 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2177 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2178 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2179 NULL, adapter->queues[i].evnamebuf, "TSO");
2180 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2181 NULL, adapter->queues[i].evnamebuf,
2182 "Queue No Descriptor Available");
2183 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2184 NULL, adapter->queues[i].evnamebuf,
2185 "Queue Packets Transmitted");
2186 #ifndef IXGBE_LEGACY_TX
2187 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2188 NULL, adapter->queues[i].evnamebuf,
2189 "Packets dropped in pcq");
2190 #endif
2191
2192 #ifdef LRO
2193 struct lro_ctrl *lro = &rxr->lro;
2194 #endif /* LRO */
2195
2196 #if 0 /* not yet */
2197 if (sysctl_createv(log, 0, &rnode, &cnode,
2198 CTLFLAG_READONLY,
2199 CTLTYPE_INT,
2200 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2201 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2202 CTL_CREATE, CTL_EOL) != 0)
2203 break;
2204
2205 if (sysctl_createv(log, 0, &rnode, &cnode,
2206 CTLFLAG_READONLY,
2207 CTLTYPE_INT,
2208 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2209 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2210 CTL_CREATE, CTL_EOL) != 0)
2211 break;
2212 #endif
2213
2214 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2215 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2216 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2217 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2218 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2219 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2220 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2221 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2222 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2223 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2224 #ifdef LRO
2225 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2226 CTLFLAG_RD, &lro->lro_queued, 0,
2227 "LRO Queued");
2228 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2229 CTLFLAG_RD, &lro->lro_flushed, 0,
2230 "LRO Flushed");
2231 #endif /* LRO */
2232 }
2233
2234 /* MAC stats get their own sub node */
2235
2236 snprintf(stats->namebuf,
2237 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2238
2239 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2240 stats->namebuf, "rx csum offload - IP");
2241 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2242 stats->namebuf, "rx csum offload - L4");
2243 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2244 stats->namebuf, "rx csum offload - IP bad");
2245 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2246 stats->namebuf, "rx csum offload - L4 bad");
2247
2248 /* Packet Reception Stats */
2249 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2250 xname, "Good Packets Received");
2251 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2252 xname, "Good Octets Received");
2253 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2254 xname, "Multicast Packets Received");
2255 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2256 xname, "Good Packets Transmitted");
2257 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2258 xname, "Good Octets Transmitted");
2259
2260 /* Mailbox Stats */
2261 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2262 xname, "message TXs");
2263 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2264 xname, "message RXs");
2265 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2266 xname, "ACKs");
2267 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2268 xname, "REQs");
2269 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2270 xname, "RSTs");
2271
2272 } /* ixv_add_stats_sysctls */
2273
2274 /************************************************************************
2275 * ixv_set_sysctl_value
2276 ************************************************************************/
2277 static void
2278 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2279 const char *description, int *limit, int value)
2280 {
2281 device_t dev = adapter->dev;
2282 struct sysctllog **log;
2283 const struct sysctlnode *rnode, *cnode;
2284
2285 log = &adapter->sysctllog;
2286 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2287 aprint_error_dev(dev, "could not create sysctl root\n");
2288 return;
2289 }
2290 if (sysctl_createv(log, 0, &rnode, &cnode,
2291 CTLFLAG_READWRITE, CTLTYPE_INT,
2292 name, SYSCTL_DESCR(description),
2293 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2294 aprint_error_dev(dev, "could not create sysctl\n");
2295 *limit = value;
2296 } /* ixv_set_sysctl_value */
2297
2298 /************************************************************************
2299 * ixv_print_debug_info
2300 *
2301 * Called only when em_display_debug_stats is enabled.
2302 * Provides a way to take a look at important statistics
2303 * maintained by the driver and hardware.
2304 ************************************************************************/
2305 static void
2306 ixv_print_debug_info(struct adapter *adapter)
2307 {
2308 device_t dev = adapter->dev;
2309 struct ixgbe_hw *hw = &adapter->hw;
2310 struct ix_queue *que = adapter->queues;
2311 struct rx_ring *rxr;
2312 struct tx_ring *txr;
2313 #ifdef LRO
2314 struct lro_ctrl *lro;
2315 #endif /* LRO */
2316
2317 device_printf(dev, "Error Byte Count = %u \n",
2318 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2319
2320 for (int i = 0; i < adapter->num_queues; i++, que++) {
2321 txr = que->txr;
2322 rxr = que->rxr;
2323 #ifdef LRO
2324 lro = &rxr->lro;
2325 #endif /* LRO */
2326 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2327 que->msix, (long)que->irqs.ev_count);
2328 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2329 rxr->me, (long long)rxr->rx_packets.ev_count);
2330 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2331 rxr->me, (long)rxr->rx_bytes.ev_count);
2332 #ifdef LRO
2333 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2334 rxr->me, (long long)lro->lro_queued);
2335 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2336 rxr->me, (long long)lro->lro_flushed);
2337 #endif /* LRO */
2338 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2339 txr->me, (long)txr->total_packets.ev_count);
2340 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2341 txr->me, (long)txr->no_desc_avail.ev_count);
2342 }
2343
2344 device_printf(dev, "MBX IRQ Handled: %lu\n",
2345 (long)adapter->link_irq.ev_count);
2346 } /* ixv_print_debug_info */
2347
2348 /************************************************************************
2349 * ixv_sysctl_debug
2350 ************************************************************************/
2351 static int
2352 ixv_sysctl_debug(SYSCTLFN_ARGS)
2353 {
2354 struct sysctlnode node;
2355 struct adapter *adapter;
2356 int error, result;
2357
2358 node = *rnode;
2359 node.sysctl_data = &result;
2360 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2361
2362 if (error || newp == NULL)
2363 return error;
2364
2365 if (result == 1) {
2366 adapter = (struct adapter *)node.sysctl_data;
2367 ixv_print_debug_info(adapter);
2368 }
2369
2370 return 0;
2371 } /* ixv_sysctl_debug */
2372
2373 /************************************************************************
2374 * ixv_init_device_features
2375 ************************************************************************/
2376 static void
2377 ixv_init_device_features(struct adapter *adapter)
2378 {
2379 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2380 | IXGBE_FEATURE_VF
2381 | IXGBE_FEATURE_RSS
2382 | IXGBE_FEATURE_LEGACY_TX;
2383
2384 /* A tad short on feature flags for VFs, atm. */
2385 switch (adapter->hw.mac.type) {
2386 case ixgbe_mac_82599_vf:
2387 break;
2388 case ixgbe_mac_X540_vf:
2389 break;
2390 case ixgbe_mac_X550_vf:
2391 case ixgbe_mac_X550EM_x_vf:
2392 case ixgbe_mac_X550EM_a_vf:
2393 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2394 break;
2395 default:
2396 break;
2397 }
2398
2399 /* Enabled by default... */
2400 /* Is a virtual function (VF) */
2401 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2402 adapter->feat_en |= IXGBE_FEATURE_VF;
2403 /* Netmap */
2404 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2405 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2406 /* Receive-Side Scaling (RSS) */
2407 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2408 adapter->feat_en |= IXGBE_FEATURE_RSS;
2409 /* Needs advanced context descriptor regardless of offloads req'd */
2410 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2411 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2412
2413 /* Enabled via sysctl... */
2414 /* Legacy (single queue) transmit */
2415 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2416 ixv_enable_legacy_tx)
2417 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2418 } /* ixv_init_device_features */
2419
2420 /************************************************************************
2421 * ixv_shutdown - Shutdown entry point
2422 ************************************************************************/
2423 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2424 static int
2425 ixv_shutdown(device_t dev)
2426 {
2427 struct adapter *adapter = device_private(dev);
2428 IXGBE_CORE_LOCK(adapter);
2429 ixv_stop(adapter);
2430 IXGBE_CORE_UNLOCK(adapter);
2431
2432 return (0);
2433 } /* ixv_shutdown */
2434 #endif
2435
2436 static int
2437 ixv_ifflags_cb(struct ethercom *ec)
2438 {
2439 struct ifnet *ifp = &ec->ec_if;
2440 struct adapter *adapter = ifp->if_softc;
2441 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2442
2443 IXGBE_CORE_LOCK(adapter);
2444
2445 if (change != 0)
2446 adapter->if_flags = ifp->if_flags;
2447
2448 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2449 rc = ENETRESET;
2450
2451 /* Set up VLAN support and filter */
2452 ixv_setup_vlan_support(adapter);
2453
2454 IXGBE_CORE_UNLOCK(adapter);
2455
2456 return rc;
2457 }
2458
2459
2460 /************************************************************************
2461 * ixv_ioctl - Ioctl entry point
2462 *
2463 * Called when the user wants to configure the interface.
2464 *
2465 * return 0 on success, positive on failure
2466 ************************************************************************/
2467 static int
2468 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2469 {
2470 struct adapter *adapter = ifp->if_softc;
2471 struct ifcapreq *ifcr = data;
2472 struct ifreq *ifr = data;
2473 int error = 0;
2474 int l4csum_en;
2475 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2476 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2477
2478 switch (command) {
2479 case SIOCSIFFLAGS:
2480 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2481 break;
2482 case SIOCADDMULTI:
2483 case SIOCDELMULTI:
2484 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2485 break;
2486 case SIOCSIFMEDIA:
2487 case SIOCGIFMEDIA:
2488 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2489 break;
2490 case SIOCSIFCAP:
2491 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2492 break;
2493 case SIOCSIFMTU:
2494 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2495 break;
2496 default:
2497 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2498 break;
2499 }
2500
2501 switch (command) {
2502 case SIOCSIFMEDIA:
2503 case SIOCGIFMEDIA:
2504 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2505 case SIOCSIFCAP:
2506 /* Layer-4 Rx checksum offload has to be turned on and
2507 * off as a unit.
2508 */
2509 l4csum_en = ifcr->ifcr_capenable & l4csum;
2510 if (l4csum_en != l4csum && l4csum_en != 0)
2511 return EINVAL;
2512 /*FALLTHROUGH*/
2513 case SIOCADDMULTI:
2514 case SIOCDELMULTI:
2515 case SIOCSIFFLAGS:
2516 case SIOCSIFMTU:
2517 default:
2518 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2519 return error;
2520 if ((ifp->if_flags & IFF_RUNNING) == 0)
2521 ;
2522 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2523 IXGBE_CORE_LOCK(adapter);
2524 ixv_init_locked(adapter);
2525 IXGBE_CORE_UNLOCK(adapter);
2526 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2527 /*
2528 * Multicast list has changed; set the hardware filter
2529 * accordingly.
2530 */
2531 IXGBE_CORE_LOCK(adapter);
2532 ixv_disable_intr(adapter);
2533 ixv_set_multi(adapter);
2534 ixv_enable_intr(adapter);
2535 IXGBE_CORE_UNLOCK(adapter);
2536 }
2537 return 0;
2538 }
2539 } /* ixv_ioctl */
2540
2541 /************************************************************************
2542 * ixv_init
2543 ************************************************************************/
2544 static int
2545 ixv_init(struct ifnet *ifp)
2546 {
2547 struct adapter *adapter = ifp->if_softc;
2548
2549 IXGBE_CORE_LOCK(adapter);
2550 ixv_init_locked(adapter);
2551 IXGBE_CORE_UNLOCK(adapter);
2552
2553 return 0;
2554 } /* ixv_init */
2555
2556
2557 /************************************************************************
2558 * ixv_handle_que
2559 ************************************************************************/
2560 static void
2561 ixv_handle_que(void *context)
2562 {
2563 struct ix_queue *que = context;
2564 struct adapter *adapter = que->adapter;
2565 struct tx_ring *txr = que->txr;
2566 struct ifnet *ifp = adapter->ifp;
2567 bool more;
2568
2569 adapter->handleq.ev_count++;
2570
2571 if (ifp->if_flags & IFF_RUNNING) {
2572 more = ixgbe_rxeof(que);
2573 IXGBE_TX_LOCK(txr);
2574 ixgbe_txeof(txr);
2575 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2576 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2577 ixgbe_mq_start_locked(ifp, txr);
2578 /* Only for queue 0 */
2579 /* NetBSD still needs this for CBQ */
2580 if ((&adapter->queues[0] == que)
2581 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2582 ixgbe_legacy_start_locked(ifp, txr);
2583 IXGBE_TX_UNLOCK(txr);
2584 if (more) {
2585 adapter->req.ev_count++;
2586 softint_schedule(que->que_si);
2587 return;
2588 }
2589 }
2590
2591 /* Re-enable this interrupt */
2592 ixv_enable_queue(adapter, que->msix);
2593
2594 return;
2595 } /* ixv_handle_que */
2596
2597 /************************************************************************
2598 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2599 ************************************************************************/
2600 static int
2601 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2602 {
2603 device_t dev = adapter->dev;
2604 struct ix_queue *que = adapter->queues;
2605 struct tx_ring *txr = adapter->tx_rings;
2606 int error, msix_ctrl, rid, vector = 0;
2607 pci_chipset_tag_t pc;
2608 pcitag_t tag;
2609 char intrbuf[PCI_INTRSTR_LEN];
2610 char intr_xname[32];
2611 const char *intrstr = NULL;
2612 kcpuset_t *affinity;
2613 int cpu_id = 0;
2614
2615 pc = adapter->osdep.pc;
2616 tag = adapter->osdep.tag;
2617
2618 adapter->osdep.nintrs = adapter->num_queues + 1;
2619 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2620 adapter->osdep.nintrs) != 0) {
2621 aprint_error_dev(dev,
2622 "failed to allocate MSI-X interrupt\n");
2623 return (ENXIO);
2624 }
2625
2626 kcpuset_create(&affinity, false);
2627 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2628 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2629 device_xname(dev), i);
2630 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2631 sizeof(intrbuf));
2632 #ifdef IXGBE_MPSAFE
2633 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2634 true);
2635 #endif
2636 /* Set the handler function */
2637 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2638 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2639 intr_xname);
2640 if (que->res == NULL) {
2641 pci_intr_release(pc, adapter->osdep.intrs,
2642 adapter->osdep.nintrs);
2643 aprint_error_dev(dev,
2644 "Failed to register QUE handler\n");
2645 kcpuset_destroy(affinity);
2646 return (ENXIO);
2647 }
2648 que->msix = vector;
2649 adapter->active_queues |= (u64)(1 << que->msix);
2650
2651 cpu_id = i;
2652 /* Round-robin affinity */
2653 kcpuset_zero(affinity);
2654 kcpuset_set(affinity, cpu_id % ncpu);
2655 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2656 NULL);
2657 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2658 intrstr);
2659 if (error == 0)
2660 aprint_normal(", bound queue %d to cpu %d\n",
2661 i, cpu_id % ncpu);
2662 else
2663 aprint_normal("\n");
2664
2665 #ifndef IXGBE_LEGACY_TX
2666 txr->txr_si
2667 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2668 ixgbe_deferred_mq_start, txr);
2669 #endif
2670 que->que_si
2671 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2672 ixv_handle_que, que);
2673 if (que->que_si == NULL) {
2674 aprint_error_dev(dev,
2675 "could not establish software interrupt\n");
2676 }
2677 }
2678
2679 /* and Mailbox */
2680 cpu_id++;
2681 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2682 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2683 sizeof(intrbuf));
2684 #ifdef IXGBE_MPSAFE
2685 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2686 true);
2687 #endif
2688 /* Set the mbx handler function */
2689 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2690 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2691 intr_xname);
2692 if (adapter->osdep.ihs[vector] == NULL) {
2693 adapter->res = NULL;
2694 aprint_error_dev(dev, "Failed to register LINK handler\n");
2695 kcpuset_destroy(affinity);
2696 return (ENXIO);
2697 }
2698 /* Round-robin affinity */
2699 kcpuset_zero(affinity);
2700 kcpuset_set(affinity, cpu_id % ncpu);
2701 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2702
2703 aprint_normal_dev(dev,
2704 "for link, interrupting at %s", intrstr);
2705 if (error == 0)
2706 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2707 else
2708 aprint_normal("\n");
2709
2710 adapter->vector = vector;
2711 /* Tasklets for Mailbox */
2712 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2713 ixv_handle_link, adapter);
2714 /*
2715 * Due to a broken design QEMU will fail to properly
2716 * enable the guest for MSI-X unless the vectors in
2717 * the table are all set up, so we must rewrite the
2718 * ENABLE in the MSI-X control register again at this
2719 * point to cause it to successfully initialize us.
2720 */
2721 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2722 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2723 rid += PCI_MSIX_CTL;
2724 msix_ctrl = pci_conf_read(pc, tag, rid);
2725 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2726 pci_conf_write(pc, tag, rid, msix_ctrl);
2727 }
2728
2729 kcpuset_destroy(affinity);
2730 return (0);
2731 } /* ixv_allocate_msix */
2732
2733 /************************************************************************
2734 * ixv_configure_interrupts - Setup MSI-X resources
2735 *
2736 * Note: The VF device MUST use MSI-X, there is no fallback.
2737 ************************************************************************/
2738 static int
2739 ixv_configure_interrupts(struct adapter *adapter)
2740 {
2741 device_t dev = adapter->dev;
2742 int want, queues, msgs;
2743
2744 /* Must have at least 2 MSI-X vectors */
2745 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2746 if (msgs < 2) {
2747 aprint_error_dev(dev, "MSIX config error\n");
2748 return (ENXIO);
2749 }
2750 msgs = MIN(msgs, IXG_MAX_NINTR);
2751
2752 /* Figure out a reasonable auto config value */
2753 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2754
2755 if (ixv_num_queues != 0)
2756 queues = ixv_num_queues;
2757 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
2758 queues = IXGBE_VF_MAX_TX_QUEUES;
2759
2760 /*
2761 * Want vectors for the queues,
2762 * plus an additional for mailbox.
2763 */
2764 want = queues + 1;
2765 if (msgs >= want)
2766 msgs = want;
2767 else {
2768 aprint_error_dev(dev,
2769 "MSI-X Configuration Problem, "
2770 "%d vectors but %d queues wanted!\n",
2771 msgs, want);
2772 return -1;
2773 }
2774
2775 adapter->msix_mem = (void *)1; /* XXX */
2776 aprint_normal_dev(dev,
2777 "Using MSI-X interrupts with %d vectors\n", msgs);
2778 adapter->num_queues = queues;
2779
2780 return (0);
2781 } /* ixv_configure_interrupts */
2782
2783
2784 /************************************************************************
2785 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2786 *
2787 * Done outside of interrupt context since the driver might sleep
2788 ************************************************************************/
2789 static void
2790 ixv_handle_link(void *context)
2791 {
2792 struct adapter *adapter = context;
2793
2794 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2795 &adapter->link_up, FALSE);
2796 ixv_update_link_status(adapter);
2797 } /* ixv_handle_link */
2798
2799 /************************************************************************
2800 * ixv_check_link - Used in the local timer to poll for link changes
2801 ************************************************************************/
2802 static void
2803 ixv_check_link(struct adapter *adapter)
2804 {
2805 adapter->hw.mac.get_link_status = TRUE;
2806
2807 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2808 &adapter->link_up, FALSE);
2809 ixv_update_link_status(adapter);
2810 } /* ixv_check_link */
2811