ixv.c revision 1.60 1 /*$NetBSD: ixv.c,v 1.60 2017/09/11 10:11:05 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static void ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121
122 static void ixv_setup_vlan_support(struct adapter *);
123 #if 0
124 static void ixv_register_vlan(void *, struct ifnet *, u16);
125 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
126 #endif
127
128 static void ixv_add_device_sysctls(struct adapter *);
129 static void ixv_save_stats(struct adapter *);
130 static void ixv_init_stats(struct adapter *);
131 static void ixv_update_stats(struct adapter *);
132 static void ixv_add_stats_sysctls(struct adapter *);
133 static void ixv_set_sysctl_value(struct adapter *, const char *,
134 const char *, int *, int);
135
136 /* The MSI-X Interrupt handlers */
137 static int ixv_msix_que(void *);
138 static int ixv_msix_mbx(void *);
139
140 /* Deferred interrupt tasklets */
141 static void ixv_handle_que(void *);
142 static void ixv_handle_link(void *);
143
144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
146
147 /************************************************************************
148 * FreeBSD Device Interface Entry Points
149 ************************************************************************/
150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
151 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
152 DVF_DETACH_SHUTDOWN);
153
154 #if 0
155 static driver_t ixv_driver = {
156 "ixv", ixv_methods, sizeof(struct adapter),
157 };
158
159 devclass_t ixv_devclass;
160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 #endif
164
165 /*
166 * TUNEABLE PARAMETERS:
167 */
168
169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
170 static int ixv_num_queues = 0;
171 #define TUNABLE_INT(__x, __y)
172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
173
174 /*
175 * AIM: Adaptive Interrupt Moderation
176 * which means that the interrupt rate
177 * is varied over time based on the
178 * traffic for that interrupt vector
179 */
180 static bool ixv_enable_aim = false;
181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
182
183 /* How many packets rxeof tries to clean at a time */
184 static int ixv_rx_process_limit = 256;
185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
186
187 /* How many packets txeof tries to clean at a time */
188 static int ixv_tx_process_limit = 256;
189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
190
191 /*
192 * Number of TX descriptors per ring,
193 * setting higher than RX as this seems
194 * the better performing choice.
195 */
196 static int ixv_txd = DEFAULT_TXD;
197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
198
199 /* Number of RX descriptors per ring */
200 static int ixv_rxd = DEFAULT_RXD;
201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
202
203 /* Legacy Transmit (single queue) */
204 static int ixv_enable_legacy_tx = 0;
205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
206
207 /*
208 * Shadow VFTA table, this is needed because
209 * the real filter table gets cleared during
210 * a soft reset and we need to repopulate it.
211 */
212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
213
214 #ifdef NET_MPSAFE
215 #define IXGBE_MPSAFE 1
216 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
217 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
218 #else
219 #define IXGBE_CALLOUT_FLAGS 0
220 #define IXGBE_SOFTINFT_FLAGS 0
221 #endif
222
223 #if 0
224 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
225 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
226 #endif
227
228 /************************************************************************
229 * ixv_probe - Device identification routine
230 *
231 * Determines if the driver should be loaded on
232 * adapter based on its PCI vendor/device ID.
233 *
234 * return BUS_PROBE_DEFAULT on success, positive on failure
235 ************************************************************************/
236 static int
237 ixv_probe(device_t dev, cfdata_t cf, void *aux)
238 {
239 #ifdef __HAVE_PCI_MSI_MSIX
240 const struct pci_attach_args *pa = aux;
241
242 return (ixv_lookup(pa) != NULL) ? 1 : 0;
243 #else
244 return 0;
245 #endif
246 } /* ixv_probe */
247
248 static ixgbe_vendor_info_t *
249 ixv_lookup(const struct pci_attach_args *pa)
250 {
251 ixgbe_vendor_info_t *ent;
252 pcireg_t subid;
253
254 INIT_DEBUGOUT("ixv_lookup: begin");
255
256 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
257 return NULL;
258
259 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
260
261 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
262 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
263 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
264 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
265 (ent->subvendor_id == 0)) &&
266 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
267 (ent->subdevice_id == 0))) {
268 return ent;
269 }
270 }
271
272 return NULL;
273 }
274
275 /************************************************************************
276 * ixv_attach - Device initialization routine
277 *
278 * Called when the driver is being loaded.
279 * Identifies the type of hardware, allocates all resources
280 * and initializes the hardware.
281 *
282 * return 0 on success, positive on failure
283 ************************************************************************/
284 static void
285 ixv_attach(device_t parent, device_t dev, void *aux)
286 {
287 struct adapter *adapter;
288 struct ixgbe_hw *hw;
289 int error = 0;
290 pcireg_t id, subid;
291 ixgbe_vendor_info_t *ent;
292 const struct pci_attach_args *pa = aux;
293 const char *apivstr;
294 INIT_DEBUGOUT("ixv_attach: begin");
295
296 /*
297 * Make sure BUSMASTER is set, on a VM under
298 * KVM it may not be and will break things.
299 */
300 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
301
302 /* Allocate, clear, and link in our adapter structure */
303 adapter = device_private(dev);
304 adapter->dev = dev;
305 adapter->hw.back = adapter;
306 hw = &adapter->hw;
307
308 adapter->init_locked = ixv_init_locked;
309 adapter->stop_locked = ixv_stop;
310
311 adapter->osdep.pc = pa->pa_pc;
312 adapter->osdep.tag = pa->pa_tag;
313 if (pci_dma64_available(pa))
314 adapter->osdep.dmat = pa->pa_dmat64;
315 else
316 adapter->osdep.dmat = pa->pa_dmat;
317 adapter->osdep.attached = false;
318
319 ent = ixv_lookup(pa);
320
321 KASSERT(ent != NULL);
322
323 aprint_normal(": %s, Version - %s\n",
324 ixv_strings[ent->index], ixv_driver_version);
325
326 /* Core Lock Init*/
327 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
328
329 /* Do base PCI setup - map BAR0 */
330 if (ixv_allocate_pci_resources(adapter, pa)) {
331 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
332 error = ENXIO;
333 goto err_out;
334 }
335
336 /* SYSCTL APIs */
337 ixv_add_device_sysctls(adapter);
338
339 /* Set up the timer callout */
340 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
341
342 /* Save off the information about this board */
343 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
344 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
345 hw->vendor_id = PCI_VENDOR(id);
346 hw->device_id = PCI_PRODUCT(id);
347 hw->revision_id =
348 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
349 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
350 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
351
352 /* A subset of set_mac_type */
353 switch (hw->device_id) {
354 case IXGBE_DEV_ID_82599_VF:
355 hw->mac.type = ixgbe_mac_82599_vf;
356 break;
357 case IXGBE_DEV_ID_X540_VF:
358 hw->mac.type = ixgbe_mac_X540_vf;
359 break;
360 case IXGBE_DEV_ID_X550_VF:
361 hw->mac.type = ixgbe_mac_X550_vf;
362 break;
363 case IXGBE_DEV_ID_X550EM_X_VF:
364 hw->mac.type = ixgbe_mac_X550EM_x_vf;
365 break;
366 case IXGBE_DEV_ID_X550EM_A_VF:
367 hw->mac.type = ixgbe_mac_X550EM_a_vf;
368 break;
369 default:
370 /* Shouldn't get here since probe succeeded */
371 aprint_error_dev(dev, "Unknown device ID!\n");
372 error = ENXIO;
373 goto err_out;
374 break;
375 }
376
377 ixv_init_device_features(adapter);
378
379 /* Initialize the shared code */
380 error = ixgbe_init_ops_vf(hw);
381 if (error) {
382 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
383 error = EIO;
384 goto err_out;
385 }
386
387 /* Setup the mailbox */
388 ixgbe_init_mbx_params_vf(hw);
389
390 /* Set the right number of segments */
391 adapter->num_segs = IXGBE_82599_SCATTER;
392
393 /* Reset mbox api to 1.0 */
394 error = hw->mac.ops.reset_hw(hw);
395 if (error == IXGBE_ERR_RESET_FAILED)
396 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
397 else if (error)
398 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
399 error);
400 if (error) {
401 error = EIO;
402 goto err_out;
403 }
404
405 error = hw->mac.ops.init_hw(hw);
406 if (error) {
407 aprint_error_dev(dev, "...init_hw() failed!\n");
408 error = EIO;
409 goto err_out;
410 }
411
412 /* Negotiate mailbox API version */
413 error = ixv_negotiate_api(adapter);
414 if (error)
415 aprint_normal_dev(dev,
416 "MBX API negotiation failed during attach!\n");
417 switch (hw->api_version) {
418 case ixgbe_mbox_api_10:
419 apivstr = "1.0";
420 break;
421 case ixgbe_mbox_api_20:
422 apivstr = "2.0";
423 break;
424 case ixgbe_mbox_api_11:
425 apivstr = "1.1";
426 break;
427 case ixgbe_mbox_api_12:
428 apivstr = "1.2";
429 break;
430 case ixgbe_mbox_api_13:
431 apivstr = "1.3";
432 break;
433 default:
434 apivstr = "unknown";
435 break;
436 }
437 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
438
439 /* If no mac address was assigned, make a random one */
440 if (!ixv_check_ether_addr(hw->mac.addr)) {
441 u8 addr[ETHER_ADDR_LEN];
442 uint64_t rndval = cprng_strong64();
443
444 memcpy(addr, &rndval, sizeof(addr));
445 addr[0] &= 0xFE;
446 addr[0] |= 0x02;
447 bcopy(addr, hw->mac.addr, sizeof(addr));
448 }
449
450 /* Register for VLAN events */
451 #if 0 /* XXX delete after write? */
452 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
453 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
454 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
455 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
456 #endif
457
458 /* Sysctls for limiting the amount of work done in the taskqueues */
459 ixv_set_sysctl_value(adapter, "rx_processing_limit",
460 "max number of rx packets to process",
461 &adapter->rx_process_limit, ixv_rx_process_limit);
462
463 ixv_set_sysctl_value(adapter, "tx_processing_limit",
464 "max number of tx packets to process",
465 &adapter->tx_process_limit, ixv_tx_process_limit);
466
467 /* Do descriptor calc and sanity checks */
468 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
469 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
470 aprint_error_dev(dev, "TXD config issue, using default!\n");
471 adapter->num_tx_desc = DEFAULT_TXD;
472 } else
473 adapter->num_tx_desc = ixv_txd;
474
475 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
476 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
477 aprint_error_dev(dev, "RXD config issue, using default!\n");
478 adapter->num_rx_desc = DEFAULT_RXD;
479 } else
480 adapter->num_rx_desc = ixv_rxd;
481
482 /* Setup MSI-X */
483 error = ixv_configure_interrupts(adapter);
484 if (error)
485 goto err_out;
486
487 /* Allocate our TX/RX Queues */
488 if (ixgbe_allocate_queues(adapter)) {
489 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
490 error = ENOMEM;
491 goto err_out;
492 }
493
494 /* hw.ix defaults init */
495 adapter->enable_aim = ixv_enable_aim;
496
497 /* Setup OS specific network interface */
498 ixv_setup_interface(dev, adapter);
499
500 error = ixv_allocate_msix(adapter, pa);
501 if (error) {
502 device_printf(dev, "ixv_allocate_msix() failed!\n");
503 goto err_late;
504 }
505
506 /* Do the stats setup */
507 ixv_save_stats(adapter);
508 ixv_init_stats(adapter);
509 ixv_add_stats_sysctls(adapter);
510
511 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
512 ixgbe_netmap_attach(adapter);
513
514 INIT_DEBUGOUT("ixv_attach: end");
515 adapter->osdep.attached = true;
516
517 return;
518
519 err_late:
520 ixgbe_free_transmit_structures(adapter);
521 ixgbe_free_receive_structures(adapter);
522 free(adapter->queues, M_DEVBUF);
523 err_out:
524 ixv_free_pci_resources(adapter);
525 IXGBE_CORE_LOCK_DESTROY(adapter);
526
527 return;
528 } /* ixv_attach */
529
530 /************************************************************************
531 * ixv_detach - Device removal routine
532 *
533 * Called when the driver is being removed.
534 * Stops the adapter and deallocates all the resources
535 * that were allocated for driver operation.
536 *
537 * return 0 on success, positive on failure
538 ************************************************************************/
539 static int
540 ixv_detach(device_t dev, int flags)
541 {
542 struct adapter *adapter = device_private(dev);
543 struct ix_queue *que = adapter->queues;
544 struct tx_ring *txr = adapter->tx_rings;
545 struct rx_ring *rxr = adapter->rx_rings;
546 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
547
548 INIT_DEBUGOUT("ixv_detach: begin");
549 if (adapter->osdep.attached == false)
550 return 0;
551
552 /* Stop the interface. Callouts are stopped in it. */
553 ixv_ifstop(adapter->ifp, 1);
554
555 #if NVLAN > 0
556 /* Make sure VLANs are not using driver */
557 if (!VLAN_ATTACHED(&adapter->osdep.ec))
558 ; /* nothing to do: no VLANs */
559 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
560 vlan_ifdetach(adapter->ifp);
561 else {
562 aprint_error_dev(dev, "VLANs in use, detach first\n");
563 return EBUSY;
564 }
565 #endif
566
567 IXGBE_CORE_LOCK(adapter);
568 ixv_stop(adapter);
569 IXGBE_CORE_UNLOCK(adapter);
570
571 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
572 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
573 softint_disestablish(txr->txr_si);
574 softint_disestablish(que->que_si);
575 }
576
577 /* Drain the Mailbox(link) queue */
578 softint_disestablish(adapter->link_si);
579
580 /* Unregister VLAN events */
581 #if 0 /* XXX msaitoh delete after write? */
582 if (adapter->vlan_attach != NULL)
583 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
584 if (adapter->vlan_detach != NULL)
585 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
586 #endif
587
588 ether_ifdetach(adapter->ifp);
589 callout_halt(&adapter->timer, NULL);
590
591 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
592 netmap_detach(adapter->ifp);
593
594 ixv_free_pci_resources(adapter);
595 #if 0 /* XXX the NetBSD port is probably missing something here */
596 bus_generic_detach(dev);
597 #endif
598 if_detach(adapter->ifp);
599 if_percpuq_destroy(adapter->ipq);
600
601 sysctl_teardown(&adapter->sysctllog);
602 evcnt_detach(&adapter->handleq);
603 evcnt_detach(&adapter->req);
604 evcnt_detach(&adapter->efbig_tx_dma_setup);
605 evcnt_detach(&adapter->mbuf_defrag_failed);
606 evcnt_detach(&adapter->efbig2_tx_dma_setup);
607 evcnt_detach(&adapter->einval_tx_dma_setup);
608 evcnt_detach(&adapter->other_tx_dma_setup);
609 evcnt_detach(&adapter->eagain_tx_dma_setup);
610 evcnt_detach(&adapter->enomem_tx_dma_setup);
611 evcnt_detach(&adapter->watchdog_events);
612 evcnt_detach(&adapter->tso_err);
613 evcnt_detach(&adapter->link_irq);
614
615 txr = adapter->tx_rings;
616 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
617 evcnt_detach(&adapter->queues[i].irqs);
618 evcnt_detach(&txr->no_desc_avail);
619 evcnt_detach(&txr->total_packets);
620 evcnt_detach(&txr->tso_tx);
621 #ifndef IXGBE_LEGACY_TX
622 evcnt_detach(&txr->pcq_drops);
623 #endif
624
625 evcnt_detach(&rxr->rx_packets);
626 evcnt_detach(&rxr->rx_bytes);
627 evcnt_detach(&rxr->rx_copies);
628 evcnt_detach(&rxr->no_jmbuf);
629 evcnt_detach(&rxr->rx_discarded);
630 }
631 evcnt_detach(&stats->ipcs);
632 evcnt_detach(&stats->l4cs);
633 evcnt_detach(&stats->ipcs_bad);
634 evcnt_detach(&stats->l4cs_bad);
635
636 /* Packet Reception Stats */
637 evcnt_detach(&stats->vfgorc);
638 evcnt_detach(&stats->vfgprc);
639 evcnt_detach(&stats->vfmprc);
640
641 /* Packet Transmission Stats */
642 evcnt_detach(&stats->vfgotc);
643 evcnt_detach(&stats->vfgptc);
644
645 ixgbe_free_transmit_structures(adapter);
646 ixgbe_free_receive_structures(adapter);
647 free(adapter->queues, M_DEVBUF);
648
649 IXGBE_CORE_LOCK_DESTROY(adapter);
650
651 return (0);
652 } /* ixv_detach */
653
654 /************************************************************************
655 * ixv_init_locked - Init entry point
656 *
657 * Used in two ways: It is used by the stack as an init entry
658 * point in network interface structure. It is also used
659 * by the driver as a hw/sw initialization routine to get
660 * to a consistent state.
661 *
662 * return 0 on success, positive on failure
663 ************************************************************************/
664 static void
665 ixv_init_locked(struct adapter *adapter)
666 {
667 struct ifnet *ifp = adapter->ifp;
668 device_t dev = adapter->dev;
669 struct ixgbe_hw *hw = &adapter->hw;
670 int error = 0;
671
672 INIT_DEBUGOUT("ixv_init_locked: begin");
673 KASSERT(mutex_owned(&adapter->core_mtx));
674 hw->adapter_stopped = FALSE;
675 hw->mac.ops.stop_adapter(hw);
676 callout_stop(&adapter->timer);
677
678 /* reprogram the RAR[0] in case user changed it. */
679 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
680
681 /* Get the latest mac address, User can use a LAA */
682 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
683 IXGBE_ETH_LENGTH_OF_ADDRESS);
684 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
685
686 /* Prepare transmit descriptors and buffers */
687 if (ixgbe_setup_transmit_structures(adapter)) {
688 aprint_error_dev(dev, "Could not setup transmit structures\n");
689 ixv_stop(adapter);
690 return;
691 }
692
693 /* Reset VF and renegotiate mailbox API version */
694 hw->mac.ops.reset_hw(hw);
695 error = ixv_negotiate_api(adapter);
696 if (error)
697 device_printf(dev,
698 "Mailbox API negotiation failed in init_locked!\n");
699
700 ixv_initialize_transmit_units(adapter);
701
702 /* Setup Multicast table */
703 ixv_set_multi(adapter);
704
705 /*
706 * Determine the correct mbuf pool
707 * for doing jumbo/headersplit
708 */
709 if (ifp->if_mtu > ETHERMTU)
710 adapter->rx_mbuf_sz = MJUMPAGESIZE;
711 else
712 adapter->rx_mbuf_sz = MCLBYTES;
713
714 /* Prepare receive descriptors and buffers */
715 if (ixgbe_setup_receive_structures(adapter)) {
716 device_printf(dev, "Could not setup receive structures\n");
717 ixv_stop(adapter);
718 return;
719 }
720
721 /* Configure RX settings */
722 ixv_initialize_receive_units(adapter);
723
724 #if 0 /* XXX isn't it required? -- msaitoh */
725 /* Set the various hardware offload abilities */
726 ifp->if_hwassist = 0;
727 if (ifp->if_capenable & IFCAP_TSO4)
728 ifp->if_hwassist |= CSUM_TSO;
729 if (ifp->if_capenable & IFCAP_TXCSUM) {
730 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
731 #if __FreeBSD_version >= 800000
732 ifp->if_hwassist |= CSUM_SCTP;
733 #endif
734 }
735 #endif
736
737 /* Set up VLAN offload and filter */
738 ixv_setup_vlan_support(adapter);
739
740 /* Set up MSI-X routing */
741 ixv_configure_ivars(adapter);
742
743 /* Set up auto-mask */
744 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
745
746 /* Set moderation on the Link interrupt */
747 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
748
749 /* Stats init */
750 ixv_init_stats(adapter);
751
752 /* Config/Enable Link */
753 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
754 FALSE);
755
756 /* Start watchdog */
757 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
758
759 /* And now turn on interrupts */
760 ixv_enable_intr(adapter);
761
762 /* Now inform the stack we're ready */
763 ifp->if_flags |= IFF_RUNNING;
764 ifp->if_flags &= ~IFF_OACTIVE;
765
766 return;
767 } /* ixv_init_locked */
768
769 /*
770 * MSI-X Interrupt Handlers and Tasklets
771 */
772
773 static inline void
774 ixv_enable_queue(struct adapter *adapter, u32 vector)
775 {
776 struct ixgbe_hw *hw = &adapter->hw;
777 u32 queue = 1 << vector;
778 u32 mask;
779
780 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
781 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
782 } /* ixv_enable_queue */
783
784 static inline void
785 ixv_disable_queue(struct adapter *adapter, u32 vector)
786 {
787 struct ixgbe_hw *hw = &adapter->hw;
788 u64 queue = (u64)(1 << vector);
789 u32 mask;
790
791 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
792 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
793 } /* ixv_disable_queue */
794
795 static inline void
796 ixv_rearm_queues(struct adapter *adapter, u64 queues)
797 {
798 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
799 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
800 } /* ixv_rearm_queues */
801
802
803 /************************************************************************
804 * ixv_msix_que - MSI Queue Interrupt Service routine
805 ************************************************************************/
806 static int
807 ixv_msix_que(void *arg)
808 {
809 struct ix_queue *que = arg;
810 struct adapter *adapter = que->adapter;
811 struct ifnet *ifp = adapter->ifp;
812 struct tx_ring *txr = que->txr;
813 struct rx_ring *rxr = que->rxr;
814 bool more;
815 u32 newitr = 0;
816
817 ixv_disable_queue(adapter, que->msix);
818 ++que->irqs.ev_count;
819
820 #ifdef __NetBSD__
821 /* Don't run ixgbe_rxeof in interrupt context */
822 more = true;
823 #else
824 more = ixgbe_rxeof(que);
825 #endif
826
827 IXGBE_TX_LOCK(txr);
828 ixgbe_txeof(txr);
829 /*
830 * Make certain that if the stack
831 * has anything queued the task gets
832 * scheduled to handle it.
833 */
834 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
835 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
836 ixgbe_mq_start_locked(ifp, txr);
837 /* Only for queue 0 */
838 /* NetBSD still needs this for CBQ */
839 if ((&adapter->queues[0] == que)
840 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
841 ixgbe_legacy_start_locked(ifp, txr);
842 IXGBE_TX_UNLOCK(txr);
843
844 /* Do AIM now? */
845
846 if (adapter->enable_aim == false)
847 goto no_calc;
848 /*
849 * Do Adaptive Interrupt Moderation:
850 * - Write out last calculated setting
851 * - Calculate based on average size over
852 * the last interval.
853 */
854 if (que->eitr_setting)
855 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
856 que->eitr_setting);
857
858 que->eitr_setting = 0;
859
860 /* Idle, do nothing */
861 if ((txr->bytes == 0) && (rxr->bytes == 0))
862 goto no_calc;
863
864 if ((txr->bytes) && (txr->packets))
865 newitr = txr->bytes/txr->packets;
866 if ((rxr->bytes) && (rxr->packets))
867 newitr = max(newitr, (rxr->bytes / rxr->packets));
868 newitr += 24; /* account for hardware frame, crc */
869
870 /* set an upper boundary */
871 newitr = min(newitr, 3000);
872
873 /* Be nice to the mid range */
874 if ((newitr > 300) && (newitr < 1200))
875 newitr = (newitr / 3);
876 else
877 newitr = (newitr / 2);
878
879 newitr |= newitr << 16;
880
881 /* save for next interrupt */
882 que->eitr_setting = newitr;
883
884 /* Reset state */
885 txr->bytes = 0;
886 txr->packets = 0;
887 rxr->bytes = 0;
888 rxr->packets = 0;
889
890 no_calc:
891 if (more)
892 softint_schedule(que->que_si);
893 else /* Re-enable this interrupt */
894 ixv_enable_queue(adapter, que->msix);
895
896 return 1;
897 } /* ixv_msix_que */
898
899 /************************************************************************
900 * ixv_msix_mbx
901 ************************************************************************/
902 static int
903 ixv_msix_mbx(void *arg)
904 {
905 struct adapter *adapter = arg;
906 struct ixgbe_hw *hw = &adapter->hw;
907 u32 reg;
908
909 ++adapter->link_irq.ev_count;
910
911 /* First get the cause */
912 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
913 /* Clear interrupt with write */
914 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
915
916 /* Link status change */
917 if (reg & IXGBE_EICR_LSC)
918 softint_schedule(adapter->link_si);
919
920 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
921
922 return 1;
923 } /* ixv_msix_mbx */
924
925 /************************************************************************
926 * ixv_media_status - Media Ioctl callback
927 *
928 * Called whenever the user queries the status of
929 * the interface using ifconfig.
930 ************************************************************************/
931 static void
932 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
933 {
934 struct adapter *adapter = ifp->if_softc;
935
936 INIT_DEBUGOUT("ixv_media_status: begin");
937 IXGBE_CORE_LOCK(adapter);
938 ixv_update_link_status(adapter);
939
940 ifmr->ifm_status = IFM_AVALID;
941 ifmr->ifm_active = IFM_ETHER;
942
943 if (!adapter->link_active) {
944 ifmr->ifm_active |= IFM_NONE;
945 IXGBE_CORE_UNLOCK(adapter);
946 return;
947 }
948
949 ifmr->ifm_status |= IFM_ACTIVE;
950
951 switch (adapter->link_speed) {
952 case IXGBE_LINK_SPEED_10GB_FULL:
953 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
954 break;
955 case IXGBE_LINK_SPEED_1GB_FULL:
956 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
957 break;
958 case IXGBE_LINK_SPEED_100_FULL:
959 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
960 break;
961 case IXGBE_LINK_SPEED_10_FULL:
962 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
963 break;
964 }
965
966 IXGBE_CORE_UNLOCK(adapter);
967
968 return;
969 } /* ixv_media_status */
970
971 /************************************************************************
972 * ixv_media_change - Media Ioctl callback
973 *
974 * Called when the user changes speed/duplex using
975 * media/mediopt option with ifconfig.
976 ************************************************************************/
977 static int
978 ixv_media_change(struct ifnet *ifp)
979 {
980 struct adapter *adapter = ifp->if_softc;
981 struct ifmedia *ifm = &adapter->media;
982
983 INIT_DEBUGOUT("ixv_media_change: begin");
984
985 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
986 return (EINVAL);
987
988 switch (IFM_SUBTYPE(ifm->ifm_media)) {
989 case IFM_AUTO:
990 break;
991 default:
992 device_printf(adapter->dev, "Only auto media type\n");
993 return (EINVAL);
994 }
995
996 return (0);
997 } /* ixv_media_change */
998
999
1000 /************************************************************************
1001 * ixv_negotiate_api
1002 *
1003 * Negotiate the Mailbox API with the PF;
1004 * start with the most featured API first.
1005 ************************************************************************/
1006 static int
1007 ixv_negotiate_api(struct adapter *adapter)
1008 {
1009 struct ixgbe_hw *hw = &adapter->hw;
1010 int mbx_api[] = { ixgbe_mbox_api_11,
1011 ixgbe_mbox_api_10,
1012 ixgbe_mbox_api_unknown };
1013 int i = 0;
1014
1015 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1016 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1017 return (0);
1018 i++;
1019 }
1020
1021 return (EINVAL);
1022 } /* ixv_negotiate_api */
1023
1024
1025 /************************************************************************
1026 * ixv_set_multi - Multicast Update
1027 *
1028 * Called whenever multicast address list is updated.
1029 ************************************************************************/
1030 static void
1031 ixv_set_multi(struct adapter *adapter)
1032 {
1033 struct ether_multi *enm;
1034 struct ether_multistep step;
1035 struct ethercom *ec = &adapter->osdep.ec;
1036 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1037 u8 *update_ptr;
1038 int mcnt = 0;
1039
1040 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1041
1042 ETHER_FIRST_MULTI(step, ec, enm);
1043 while (enm != NULL) {
1044 bcopy(enm->enm_addrlo,
1045 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1046 IXGBE_ETH_LENGTH_OF_ADDRESS);
1047 mcnt++;
1048 /* XXX This might be required --msaitoh */
1049 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1050 break;
1051 ETHER_NEXT_MULTI(step, enm);
1052 }
1053
1054 update_ptr = mta;
1055
1056 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1057 ixv_mc_array_itr, TRUE);
1058
1059 return;
1060 } /* ixv_set_multi */
1061
1062 /************************************************************************
1063 * ixv_mc_array_itr
1064 *
1065 * An iterator function needed by the multicast shared code.
1066 * It feeds the shared code routine the addresses in the
1067 * array of ixv_set_multi() one by one.
1068 ************************************************************************/
1069 static u8 *
1070 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1071 {
1072 u8 *addr = *update_ptr;
1073 u8 *newptr;
1074 *vmdq = 0;
1075
1076 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1077 *update_ptr = newptr;
1078
1079 return addr;
1080 } /* ixv_mc_array_itr */
1081
1082 /************************************************************************
1083 * ixv_local_timer - Timer routine
1084 *
1085 * Checks for link status, updates statistics,
1086 * and runs the watchdog check.
1087 ************************************************************************/
1088 static void
1089 ixv_local_timer(void *arg)
1090 {
1091 struct adapter *adapter = arg;
1092
1093 IXGBE_CORE_LOCK(adapter);
1094 ixv_local_timer_locked(adapter);
1095 IXGBE_CORE_UNLOCK(adapter);
1096 }
1097
1098 static void
1099 ixv_local_timer_locked(void *arg)
1100 {
1101 struct adapter *adapter = arg;
1102 device_t dev = adapter->dev;
1103 struct ix_queue *que = adapter->queues;
1104 u64 queues = 0;
1105 int hung = 0;
1106
1107 KASSERT(mutex_owned(&adapter->core_mtx));
1108
1109 ixv_check_link(adapter);
1110
1111 /* Stats Update */
1112 ixv_update_stats(adapter);
1113
1114 /*
1115 * Check the TX queues status
1116 * - mark hung queues so we don't schedule on them
1117 * - watchdog only if all queues show hung
1118 */
1119 for (int i = 0; i < adapter->num_queues; i++, que++) {
1120 /* Keep track of queues with work for soft irq */
1121 if (que->txr->busy)
1122 queues |= ((u64)1 << que->me);
1123 /*
1124 * Each time txeof runs without cleaning, but there
1125 * are uncleaned descriptors it increments busy. If
1126 * we get to the MAX we declare it hung.
1127 */
1128 if (que->busy == IXGBE_QUEUE_HUNG) {
1129 ++hung;
1130 /* Mark the queue as inactive */
1131 adapter->active_queues &= ~((u64)1 << que->me);
1132 continue;
1133 } else {
1134 /* Check if we've come back from hung */
1135 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1136 adapter->active_queues |= ((u64)1 << que->me);
1137 }
1138 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1139 device_printf(dev,
1140 "Warning queue %d appears to be hung!\n", i);
1141 que->txr->busy = IXGBE_QUEUE_HUNG;
1142 ++hung;
1143 }
1144 }
1145
1146 /* Only truly watchdog if all queues show hung */
1147 if (hung == adapter->num_queues)
1148 goto watchdog;
1149 else if (queues != 0) { /* Force an IRQ on queues with work */
1150 ixv_rearm_queues(adapter, queues);
1151 }
1152
1153 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1154
1155 return;
1156
1157 watchdog:
1158
1159 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1160 adapter->ifp->if_flags &= ~IFF_RUNNING;
1161 adapter->watchdog_events.ev_count++;
1162 ixv_init_locked(adapter);
1163 } /* ixv_local_timer */
1164
1165 /************************************************************************
1166 * ixv_update_link_status - Update OS on link state
1167 *
1168 * Note: Only updates the OS on the cached link state.
1169 * The real check of the hardware only happens with
1170 * a link interrupt.
1171 ************************************************************************/
1172 static void
1173 ixv_update_link_status(struct adapter *adapter)
1174 {
1175 struct ifnet *ifp = adapter->ifp;
1176 device_t dev = adapter->dev;
1177
1178 if (adapter->link_up) {
1179 if (adapter->link_active == FALSE) {
1180 if (bootverbose) {
1181 const char *bpsmsg;
1182
1183 switch (adapter->link_speed) {
1184 case IXGBE_LINK_SPEED_10GB_FULL:
1185 bpsmsg = "10 Gbps";
1186 break;
1187 case IXGBE_LINK_SPEED_5GB_FULL:
1188 bpsmsg = "5 Gbps";
1189 break;
1190 case IXGBE_LINK_SPEED_2_5GB_FULL:
1191 bpsmsg = "2.5 Gbps";
1192 break;
1193 case IXGBE_LINK_SPEED_1GB_FULL:
1194 bpsmsg = "1 Gbps";
1195 break;
1196 case IXGBE_LINK_SPEED_100_FULL:
1197 bpsmsg = "100 Mbps";
1198 break;
1199 case IXGBE_LINK_SPEED_10_FULL:
1200 bpsmsg = "10 Mbps";
1201 break;
1202 default:
1203 bpsmsg = "unknown speed";
1204 break;
1205 }
1206 device_printf(dev,"Link is up %s %s \n",
1207 bpsmsg, "Full Duplex");
1208 }
1209 adapter->link_active = TRUE;
1210 if_link_state_change(ifp, LINK_STATE_UP);
1211 }
1212 } else { /* Link down */
1213 if (adapter->link_active == TRUE) {
1214 if (bootverbose)
1215 device_printf(dev,"Link is Down\n");
1216 if_link_state_change(ifp, LINK_STATE_DOWN);
1217 adapter->link_active = FALSE;
1218 }
1219 }
1220
1221 return;
1222 } /* ixv_update_link_status */
1223
1224
1225 /************************************************************************
1226 * ixv_stop - Stop the hardware
1227 *
1228 * Disables all traffic on the adapter by issuing a
1229 * global reset on the MAC and deallocates TX/RX buffers.
1230 ************************************************************************/
1231 static void
1232 ixv_ifstop(struct ifnet *ifp, int disable)
1233 {
1234 struct adapter *adapter = ifp->if_softc;
1235
1236 IXGBE_CORE_LOCK(adapter);
1237 ixv_stop(adapter);
1238 IXGBE_CORE_UNLOCK(adapter);
1239 }
1240
1241 static void
1242 ixv_stop(void *arg)
1243 {
1244 struct ifnet *ifp;
1245 struct adapter *adapter = arg;
1246 struct ixgbe_hw *hw = &adapter->hw;
1247
1248 ifp = adapter->ifp;
1249
1250 KASSERT(mutex_owned(&adapter->core_mtx));
1251
1252 INIT_DEBUGOUT("ixv_stop: begin\n");
1253 ixv_disable_intr(adapter);
1254
1255 /* Tell the stack that the interface is no longer active */
1256 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1257
1258 hw->mac.ops.reset_hw(hw);
1259 adapter->hw.adapter_stopped = FALSE;
1260 hw->mac.ops.stop_adapter(hw);
1261 callout_stop(&adapter->timer);
1262
1263 /* reprogram the RAR[0] in case user changed it. */
1264 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1265
1266 return;
1267 } /* ixv_stop */
1268
1269
1270 /************************************************************************
1271 * ixv_allocate_pci_resources
1272 ************************************************************************/
1273 static int
1274 ixv_allocate_pci_resources(struct adapter *adapter,
1275 const struct pci_attach_args *pa)
1276 {
1277 pcireg_t memtype;
1278 device_t dev = adapter->dev;
1279 bus_addr_t addr;
1280 int flags;
1281
1282 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1283 switch (memtype) {
1284 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1285 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1286 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1287 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1288 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1289 goto map_err;
1290 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1291 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1292 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1293 }
1294 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1295 adapter->osdep.mem_size, flags,
1296 &adapter->osdep.mem_bus_space_handle) != 0) {
1297 map_err:
1298 adapter->osdep.mem_size = 0;
1299 aprint_error_dev(dev, "unable to map BAR0\n");
1300 return ENXIO;
1301 }
1302 break;
1303 default:
1304 aprint_error_dev(dev, "unexpected type on BAR0\n");
1305 return ENXIO;
1306 }
1307
1308 /* Pick up the tuneable queues */
1309 adapter->num_queues = ixv_num_queues;
1310
1311 return (0);
1312 } /* ixv_allocate_pci_resources */
1313
1314 /************************************************************************
1315 * ixv_free_pci_resources
1316 ************************************************************************/
1317 static void
1318 ixv_free_pci_resources(struct adapter * adapter)
1319 {
1320 struct ix_queue *que = adapter->queues;
1321 int rid;
1322
1323 /*
1324 * Release all msix queue resources:
1325 */
1326 for (int i = 0; i < adapter->num_queues; i++, que++) {
1327 if (que->res != NULL)
1328 pci_intr_disestablish(adapter->osdep.pc,
1329 adapter->osdep.ihs[i]);
1330 }
1331
1332
1333 /* Clean the Mailbox interrupt last */
1334 rid = adapter->vector;
1335
1336 if (adapter->osdep.ihs[rid] != NULL) {
1337 pci_intr_disestablish(adapter->osdep.pc,
1338 adapter->osdep.ihs[rid]);
1339 adapter->osdep.ihs[rid] = NULL;
1340 }
1341
1342 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1343 adapter->osdep.nintrs);
1344
1345 if (adapter->osdep.mem_size != 0) {
1346 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1347 adapter->osdep.mem_bus_space_handle,
1348 adapter->osdep.mem_size);
1349 }
1350
1351 return;
1352 } /* ixv_free_pci_resources */
1353
1354 /************************************************************************
1355 * ixv_setup_interface
1356 *
1357 * Setup networking device structure and register an interface.
1358 ************************************************************************/
1359 static void
1360 ixv_setup_interface(device_t dev, struct adapter *adapter)
1361 {
1362 struct ethercom *ec = &adapter->osdep.ec;
1363 struct ifnet *ifp;
1364
1365 INIT_DEBUGOUT("ixv_setup_interface: begin");
1366
1367 ifp = adapter->ifp = &ec->ec_if;
1368 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1369 ifp->if_baudrate = IF_Gbps(10);
1370 ifp->if_init = ixv_init;
1371 ifp->if_stop = ixv_ifstop;
1372 ifp->if_softc = adapter;
1373 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1374 #ifdef IXGBE_MPSAFE
1375 ifp->if_extflags = IFEF_START_MPSAFE;
1376 #endif
1377 ifp->if_ioctl = ixv_ioctl;
1378 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1379 #if 0
1380 ixv_start_locked = ixgbe_legacy_start_locked;
1381 #endif
1382 } else {
1383 ifp->if_transmit = ixgbe_mq_start;
1384 #if 0
1385 ixv_start_locked = ixgbe_mq_start_locked;
1386 #endif
1387 }
1388 ifp->if_start = ixgbe_legacy_start;
1389 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1390 IFQ_SET_READY(&ifp->if_snd);
1391
1392 if_initialize(ifp);
1393 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1394 ether_ifattach(ifp, adapter->hw.mac.addr);
1395 /*
1396 * We use per TX queue softint, so if_deferred_start_init() isn't
1397 * used.
1398 */
1399 if_register(ifp);
1400 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1401
1402 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1403
1404 /*
1405 * Tell the upper layer(s) we support long frames.
1406 */
1407 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1408
1409 /* Set capability flags */
1410 ifp->if_capabilities |= IFCAP_HWCSUM
1411 | IFCAP_TSOv4
1412 | IFCAP_TSOv6;
1413 ifp->if_capenable = 0;
1414
1415 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1416 | ETHERCAP_VLAN_HWCSUM
1417 | ETHERCAP_JUMBO_MTU
1418 | ETHERCAP_VLAN_MTU;
1419
1420 /* Enable the above capabilities by default */
1421 ec->ec_capenable = ec->ec_capabilities;
1422
1423 /* Don't enable LRO by default */
1424 ifp->if_capabilities |= IFCAP_LRO;
1425 #if 0
1426 ifp->if_capenable = ifp->if_capabilities;
1427 #endif
1428
1429 /*
1430 * Specify the media types supported by this adapter and register
1431 * callbacks to update media and link information
1432 */
1433 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1434 ixv_media_status);
1435 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1436 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1437
1438 return;
1439 } /* ixv_setup_interface */
1440
1441
1442 /************************************************************************
1443 * ixv_initialize_transmit_units - Enable transmit unit.
1444 ************************************************************************/
1445 static void
1446 ixv_initialize_transmit_units(struct adapter *adapter)
1447 {
1448 struct tx_ring *txr = adapter->tx_rings;
1449 struct ixgbe_hw *hw = &adapter->hw;
1450
1451
1452 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1453 u64 tdba = txr->txdma.dma_paddr;
1454 u32 txctrl, txdctl;
1455
1456 /* Set WTHRESH to 8, burst writeback */
1457 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1458 txdctl |= (8 << 16);
1459 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1460
1461 /* Set the HW Tx Head and Tail indices */
1462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1463 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1464
1465 /* Set Tx Tail register */
1466 txr->tail = IXGBE_VFTDT(i);
1467
1468 /* Set Ring parameters */
1469 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1470 (tdba & 0x00000000ffffffffULL));
1471 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1472 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1473 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1474 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1475 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1476 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1477
1478 /* Now enable */
1479 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1480 txdctl |= IXGBE_TXDCTL_ENABLE;
1481 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1482 }
1483
1484 return;
1485 } /* ixv_initialize_transmit_units */
1486
1487
1488 /************************************************************************
1489 * ixv_initialize_rss_mapping
1490 ************************************************************************/
1491 static void
1492 ixv_initialize_rss_mapping(struct adapter *adapter)
1493 {
1494 struct ixgbe_hw *hw = &adapter->hw;
1495 u32 reta = 0, mrqc, rss_key[10];
1496 int queue_id;
1497 int i, j;
1498 u32 rss_hash_config;
1499
1500 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1501 /* Fetch the configured RSS key */
1502 rss_getkey((uint8_t *)&rss_key);
1503 } else {
1504 /* set up random bits */
1505 cprng_fast(&rss_key, sizeof(rss_key));
1506 }
1507
1508 /* Now fill out hash function seeds */
1509 for (i = 0; i < 10; i++)
1510 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1511
1512 /* Set up the redirection table */
1513 for (i = 0, j = 0; i < 64; i++, j++) {
1514 if (j == adapter->num_queues)
1515 j = 0;
1516
1517 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1518 /*
1519 * Fetch the RSS bucket id for the given indirection
1520 * entry. Cap it at the number of configured buckets
1521 * (which is num_queues.)
1522 */
1523 queue_id = rss_get_indirection_to_bucket(i);
1524 queue_id = queue_id % adapter->num_queues;
1525 } else
1526 queue_id = j;
1527
1528 /*
1529 * The low 8 bits are for hash value (n+0);
1530 * The next 8 bits are for hash value (n+1), etc.
1531 */
1532 reta >>= 8;
1533 reta |= ((uint32_t)queue_id) << 24;
1534 if ((i & 3) == 3) {
1535 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1536 reta = 0;
1537 }
1538 }
1539
1540 /* Perform hash on these packet types */
1541 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1542 rss_hash_config = rss_gethashconfig();
1543 else {
1544 /*
1545 * Disable UDP - IP fragments aren't currently being handled
1546 * and so we end up with a mix of 2-tuple and 4-tuple
1547 * traffic.
1548 */
1549 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1550 | RSS_HASHTYPE_RSS_TCP_IPV4
1551 | RSS_HASHTYPE_RSS_IPV6
1552 | RSS_HASHTYPE_RSS_TCP_IPV6;
1553 }
1554
1555 mrqc = IXGBE_MRQC_RSSEN;
1556 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1557 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1558 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1559 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1560 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1561 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1562 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1563 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1564 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1565 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1566 __func__);
1567 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1568 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1569 __func__);
1570 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1571 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1572 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1573 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1574 __func__);
1575 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1576 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1577 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1578 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1579 __func__);
1580 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1581 } /* ixv_initialize_rss_mapping */
1582
1583
1584 /************************************************************************
1585 * ixv_initialize_receive_units - Setup receive registers and features.
1586 ************************************************************************/
1587 static void
1588 ixv_initialize_receive_units(struct adapter *adapter)
1589 {
1590 struct rx_ring *rxr = adapter->rx_rings;
1591 struct ixgbe_hw *hw = &adapter->hw;
1592 struct ifnet *ifp = adapter->ifp;
1593 u32 bufsz, rxcsum, psrtype;
1594
1595 if (ifp->if_mtu > ETHERMTU)
1596 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1597 else
1598 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1599
1600 psrtype = IXGBE_PSRTYPE_TCPHDR
1601 | IXGBE_PSRTYPE_UDPHDR
1602 | IXGBE_PSRTYPE_IPV4HDR
1603 | IXGBE_PSRTYPE_IPV6HDR
1604 | IXGBE_PSRTYPE_L2HDR;
1605
1606 if (adapter->num_queues > 1)
1607 psrtype |= 1 << 29;
1608
1609 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1610
1611 /* Tell PF our max_frame size */
1612 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1613 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1614 }
1615
1616 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1617 u64 rdba = rxr->rxdma.dma_paddr;
1618 u32 reg, rxdctl;
1619
1620 /* Disable the queue */
1621 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1622 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1623 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1624 for (int j = 0; j < 10; j++) {
1625 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1626 IXGBE_RXDCTL_ENABLE)
1627 msec_delay(1);
1628 else
1629 break;
1630 }
1631 wmb();
1632 /* Setup the Base and Length of the Rx Descriptor Ring */
1633 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1634 (rdba & 0x00000000ffffffffULL));
1635 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1636 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1637 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1638
1639 /* Reset the ring indices */
1640 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1641 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1642
1643 /* Set up the SRRCTL register */
1644 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1645 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1646 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1647 reg |= bufsz;
1648 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1649 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1650
1651 /* Capture Rx Tail index */
1652 rxr->tail = IXGBE_VFRDT(rxr->me);
1653
1654 /* Do the queue enabling last */
1655 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1656 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1657 for (int k = 0; k < 10; k++) {
1658 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1659 IXGBE_RXDCTL_ENABLE)
1660 break;
1661 msec_delay(1);
1662 }
1663 wmb();
1664
1665 /* Set the Tail Pointer */
1666 /*
1667 * In netmap mode, we must preserve the buffers made
1668 * available to userspace before the if_init()
1669 * (this is true by default on the TX side, because
1670 * init makes all buffers available to userspace).
1671 *
1672 * netmap_reset() and the device specific routines
1673 * (e.g. ixgbe_setup_receive_rings()) map these
1674 * buffers at the end of the NIC ring, so here we
1675 * must set the RDT (tail) register to make sure
1676 * they are not overwritten.
1677 *
1678 * In this driver the NIC ring starts at RDH = 0,
1679 * RDT points to the last slot available for reception (?),
1680 * so RDT = num_rx_desc - 1 means the whole ring is available.
1681 */
1682 #ifdef DEV_NETMAP
1683 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1684 (ifp->if_capenable & IFCAP_NETMAP)) {
1685 struct netmap_adapter *na = NA(adapter->ifp);
1686 struct netmap_kring *kring = &na->rx_rings[i];
1687 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1688
1689 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1690 } else
1691 #endif /* DEV_NETMAP */
1692 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1693 adapter->num_rx_desc - 1);
1694 }
1695
1696 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1697
1698 ixv_initialize_rss_mapping(adapter);
1699
1700 if (adapter->num_queues > 1) {
1701 /* RSS and RX IPP Checksum are mutually exclusive */
1702 rxcsum |= IXGBE_RXCSUM_PCSD;
1703 }
1704
1705 if (ifp->if_capenable & IFCAP_RXCSUM)
1706 rxcsum |= IXGBE_RXCSUM_PCSD;
1707
1708 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1709 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1710
1711 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1712
1713 return;
1714 } /* ixv_initialize_receive_units */
1715
1716 /************************************************************************
1717 * ixv_setup_vlan_support
1718 ************************************************************************/
1719 static void
1720 ixv_setup_vlan_support(struct adapter *adapter)
1721 {
1722 struct ixgbe_hw *hw = &adapter->hw;
1723 u32 ctrl, vid, vfta, retry;
1724
1725 /*
1726 * We get here thru init_locked, meaning
1727 * a soft reset, this has already cleared
1728 * the VFTA and other state, so if there
1729 * have been no vlan's registered do nothing.
1730 */
1731 if (!VLAN_ATTACHED(&adapter->osdep.ec))
1732 return;
1733
1734 /* Enable the queues */
1735 for (int i = 0; i < adapter->num_queues; i++) {
1736 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1737 ctrl |= IXGBE_RXDCTL_VME;
1738 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1739 /*
1740 * Let Rx path know that it needs to store VLAN tag
1741 * as part of extra mbuf info.
1742 */
1743 adapter->rx_rings[i].vtag_strip = TRUE;
1744 }
1745
1746 /*
1747 * A soft reset zero's out the VFTA, so
1748 * we need to repopulate it now.
1749 */
1750 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1751 if (ixv_shadow_vfta[i] == 0)
1752 continue;
1753 vfta = ixv_shadow_vfta[i];
1754 /*
1755 * Reconstruct the vlan id's
1756 * based on the bits set in each
1757 * of the array ints.
1758 */
1759 for (int j = 0; j < 32; j++) {
1760 retry = 0;
1761 if ((vfta & (1 << j)) == 0)
1762 continue;
1763 vid = (i * 32) + j;
1764 /* Call the shared code mailbox routine */
1765 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1766 if (++retry > 5)
1767 break;
1768 }
1769 }
1770 }
1771 } /* ixv_setup_vlan_support */
1772
1773 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1774 /************************************************************************
1775 * ixv_register_vlan
1776 *
1777 * Run via a vlan config EVENT, it enables us to use the
1778 * HW Filter table since we can get the vlan id. This just
1779 * creates the entry in the soft version of the VFTA, init
1780 * will repopulate the real table.
1781 ************************************************************************/
1782 static void
1783 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1784 {
1785 struct adapter *adapter = ifp->if_softc;
1786 u16 index, bit;
1787
1788 if (ifp->if_softc != arg) /* Not our event */
1789 return;
1790
1791 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1792 return;
1793
1794 IXGBE_CORE_LOCK(adapter);
1795 index = (vtag >> 5) & 0x7F;
1796 bit = vtag & 0x1F;
1797 ixv_shadow_vfta[index] |= (1 << bit);
1798 /* Re-init to load the changes */
1799 ixv_init_locked(adapter);
1800 IXGBE_CORE_UNLOCK(adapter);
1801 } /* ixv_register_vlan */
1802
1803 /************************************************************************
1804 * ixv_unregister_vlan
1805 *
1806 * Run via a vlan unconfig EVENT, remove our entry
1807 * in the soft vfta.
1808 ************************************************************************/
1809 static void
1810 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1811 {
1812 struct adapter *adapter = ifp->if_softc;
1813 u16 index, bit;
1814
1815 if (ifp->if_softc != arg)
1816 return;
1817
1818 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1819 return;
1820
1821 IXGBE_CORE_LOCK(adapter);
1822 index = (vtag >> 5) & 0x7F;
1823 bit = vtag & 0x1F;
1824 ixv_shadow_vfta[index] &= ~(1 << bit);
1825 /* Re-init to load the changes */
1826 ixv_init_locked(adapter);
1827 IXGBE_CORE_UNLOCK(adapter);
1828 } /* ixv_unregister_vlan */
1829 #endif
1830
1831 /************************************************************************
1832 * ixv_enable_intr
1833 ************************************************************************/
1834 static void
1835 ixv_enable_intr(struct adapter *adapter)
1836 {
1837 struct ixgbe_hw *hw = &adapter->hw;
1838 struct ix_queue *que = adapter->queues;
1839 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1840
1841
1842 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1843
1844 mask = IXGBE_EIMS_ENABLE_MASK;
1845 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1846 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1847
1848 for (int i = 0; i < adapter->num_queues; i++, que++)
1849 ixv_enable_queue(adapter, que->msix);
1850
1851 IXGBE_WRITE_FLUSH(hw);
1852
1853 return;
1854 } /* ixv_enable_intr */
1855
1856 /************************************************************************
1857 * ixv_disable_intr
1858 ************************************************************************/
1859 static void
1860 ixv_disable_intr(struct adapter *adapter)
1861 {
1862 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1863 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1864 IXGBE_WRITE_FLUSH(&adapter->hw);
1865
1866 return;
1867 } /* ixv_disable_intr */
1868
1869 /************************************************************************
1870 * ixv_set_ivar
1871 *
1872 * Setup the correct IVAR register for a particular MSI-X interrupt
1873 * - entry is the register array entry
1874 * - vector is the MSI-X vector for this queue
1875 * - type is RX/TX/MISC
1876 ************************************************************************/
1877 static void
1878 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1879 {
1880 struct ixgbe_hw *hw = &adapter->hw;
1881 u32 ivar, index;
1882
1883 vector |= IXGBE_IVAR_ALLOC_VAL;
1884
1885 if (type == -1) { /* MISC IVAR */
1886 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1887 ivar &= ~0xFF;
1888 ivar |= vector;
1889 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1890 } else { /* RX/TX IVARS */
1891 index = (16 * (entry & 1)) + (8 * type);
1892 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1893 ivar &= ~(0xFF << index);
1894 ivar |= (vector << index);
1895 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1896 }
1897 } /* ixv_set_ivar */
1898
1899 /************************************************************************
1900 * ixv_configure_ivars
1901 ************************************************************************/
1902 static void
1903 ixv_configure_ivars(struct adapter *adapter)
1904 {
1905 struct ix_queue *que = adapter->queues;
1906
1907 for (int i = 0; i < adapter->num_queues; i++, que++) {
1908 /* First the RX queue entry */
1909 ixv_set_ivar(adapter, i, que->msix, 0);
1910 /* ... and the TX */
1911 ixv_set_ivar(adapter, i, que->msix, 1);
1912 /* Set an initial value in EITR */
1913 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1914 IXGBE_EITR_DEFAULT);
1915 }
1916
1917 /* For the mailbox interrupt */
1918 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1919 } /* ixv_configure_ivars */
1920
1921
1922 /************************************************************************
1923 * ixv_save_stats
1924 *
1925 * The VF stats registers never have a truly virgin
1926 * starting point, so this routine tries to make an
1927 * artificial one, marking ground zero on attach as
1928 * it were.
1929 ************************************************************************/
1930 static void
1931 ixv_save_stats(struct adapter *adapter)
1932 {
1933 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1934
1935 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
1936 stats->saved_reset_vfgprc +=
1937 stats->vfgprc.ev_count - stats->base_vfgprc;
1938 stats->saved_reset_vfgptc +=
1939 stats->vfgptc.ev_count - stats->base_vfgptc;
1940 stats->saved_reset_vfgorc +=
1941 stats->vfgorc.ev_count - stats->base_vfgorc;
1942 stats->saved_reset_vfgotc +=
1943 stats->vfgotc.ev_count - stats->base_vfgotc;
1944 stats->saved_reset_vfmprc +=
1945 stats->vfmprc.ev_count - stats->base_vfmprc;
1946 }
1947 } /* ixv_save_stats */
1948
1949 /************************************************************************
1950 * ixv_init_stats
1951 ************************************************************************/
1952 static void
1953 ixv_init_stats(struct adapter *adapter)
1954 {
1955 struct ixgbe_hw *hw = &adapter->hw;
1956
1957 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1958 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1959 adapter->stats.vf.last_vfgorc |=
1960 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1961
1962 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1963 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1964 adapter->stats.vf.last_vfgotc |=
1965 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1966
1967 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1968
1969 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1970 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1971 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1972 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1973 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1974 } /* ixv_init_stats */
1975
1976 #define UPDATE_STAT_32(reg, last, count) \
1977 { \
1978 u32 current = IXGBE_READ_REG(hw, (reg)); \
1979 if (current < (last)) \
1980 count.ev_count += 0x100000000LL; \
1981 (last) = current; \
1982 count.ev_count &= 0xFFFFFFFF00000000LL; \
1983 count.ev_count |= current; \
1984 }
1985
1986 #define UPDATE_STAT_36(lsb, msb, last, count) \
1987 { \
1988 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
1989 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
1990 u64 current = ((cur_msb << 32) | cur_lsb); \
1991 if (current < (last)) \
1992 count.ev_count += 0x1000000000LL; \
1993 (last) = current; \
1994 count.ev_count &= 0xFFFFFFF000000000LL; \
1995 count.ev_count |= current; \
1996 }
1997
1998 /************************************************************************
1999 * ixv_update_stats - Update the board statistics counters.
2000 ************************************************************************/
2001 void
2002 ixv_update_stats(struct adapter *adapter)
2003 {
2004 struct ixgbe_hw *hw = &adapter->hw;
2005 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2006
2007 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2008 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2009 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2010 stats->vfgorc);
2011 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2012 stats->vfgotc);
2013 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2014
2015 /* Fill out the OS statistics structure */
2016 /*
2017 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2018 * adapter->stats counters. It's required to make ifconfig -z
2019 * (SOICZIFDATA) work.
2020 */
2021 } /* ixv_update_stats */
2022
2023 const struct sysctlnode *
2024 ixv_sysctl_instance(struct adapter *adapter)
2025 {
2026 const char *dvname;
2027 struct sysctllog **log;
2028 int rc;
2029 const struct sysctlnode *rnode;
2030
2031 log = &adapter->sysctllog;
2032 dvname = device_xname(adapter->dev);
2033
2034 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2035 0, CTLTYPE_NODE, dvname,
2036 SYSCTL_DESCR("ixv information and settings"),
2037 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2038 goto err;
2039
2040 return rnode;
2041 err:
2042 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2043 return NULL;
2044 }
2045
2046 static void
2047 ixv_add_device_sysctls(struct adapter *adapter)
2048 {
2049 struct sysctllog **log;
2050 const struct sysctlnode *rnode, *cnode;
2051 device_t dev;
2052
2053 dev = adapter->dev;
2054 log = &adapter->sysctllog;
2055
2056 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2057 aprint_error_dev(dev, "could not create sysctl root\n");
2058 return;
2059 }
2060
2061 if (sysctl_createv(log, 0, &rnode, &cnode,
2062 CTLFLAG_READWRITE, CTLTYPE_INT,
2063 "debug", SYSCTL_DESCR("Debug Info"),
2064 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2065 aprint_error_dev(dev, "could not create sysctl\n");
2066
2067 if (sysctl_createv(log, 0, &rnode, &cnode,
2068 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2069 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2070 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2071 aprint_error_dev(dev, "could not create sysctl\n");
2072 }
2073
2074 /************************************************************************
2075 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2076 ************************************************************************/
2077 static void
2078 ixv_add_stats_sysctls(struct adapter *adapter)
2079 {
2080 device_t dev = adapter->dev;
2081 struct tx_ring *txr = adapter->tx_rings;
2082 struct rx_ring *rxr = adapter->rx_rings;
2083 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2084 const struct sysctlnode *rnode;
2085 struct sysctllog **log = &adapter->sysctllog;
2086 const char *xname = device_xname(dev);
2087
2088 /* Driver Statistics */
2089 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2090 NULL, xname, "Handled queue in softint");
2091 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2092 NULL, xname, "Requeued in softint");
2093 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2094 NULL, xname, "Driver tx dma soft fail EFBIG");
2095 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2096 NULL, xname, "m_defrag() failed");
2097 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2098 NULL, xname, "Driver tx dma hard fail EFBIG");
2099 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2100 NULL, xname, "Driver tx dma hard fail EINVAL");
2101 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2102 NULL, xname, "Driver tx dma hard fail other");
2103 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2104 NULL, xname, "Driver tx dma soft fail EAGAIN");
2105 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2106 NULL, xname, "Driver tx dma soft fail ENOMEM");
2107 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2108 NULL, xname, "Watchdog timeouts");
2109 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2110 NULL, xname, "TSO errors");
2111 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2112 NULL, xname, "Link MSI-X IRQ Handled");
2113
2114 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2115 snprintf(adapter->queues[i].evnamebuf,
2116 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2117 xname, i);
2118 snprintf(adapter->queues[i].namebuf,
2119 sizeof(adapter->queues[i].namebuf), "q%d", i);
2120
2121 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2122 aprint_error_dev(dev, "could not create sysctl root\n");
2123 break;
2124 }
2125
2126 if (sysctl_createv(log, 0, &rnode, &rnode,
2127 0, CTLTYPE_NODE,
2128 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2129 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2130 break;
2131
2132 #if 0 /* not yet */
2133 if (sysctl_createv(log, 0, &rnode, &cnode,
2134 CTLFLAG_READWRITE, CTLTYPE_INT,
2135 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2136 ixgbe_sysctl_interrupt_rate_handler, 0,
2137 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2138 break;
2139
2140 if (sysctl_createv(log, 0, &rnode, &cnode,
2141 CTLFLAG_READONLY, CTLTYPE_QUAD,
2142 "irqs", SYSCTL_DESCR("irqs on this queue"),
2143 NULL, 0, &(adapter->queues[i].irqs),
2144 0, CTL_CREATE, CTL_EOL) != 0)
2145 break;
2146
2147 if (sysctl_createv(log, 0, &rnode, &cnode,
2148 CTLFLAG_READONLY, CTLTYPE_INT,
2149 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2150 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2151 0, CTL_CREATE, CTL_EOL) != 0)
2152 break;
2153
2154 if (sysctl_createv(log, 0, &rnode, &cnode,
2155 CTLFLAG_READONLY, CTLTYPE_INT,
2156 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2157 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2158 0, CTL_CREATE, CTL_EOL) != 0)
2159 break;
2160 #endif
2161 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2162 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2163 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2164 NULL, adapter->queues[i].evnamebuf, "TSO");
2165 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2166 NULL, adapter->queues[i].evnamebuf,
2167 "Queue No Descriptor Available");
2168 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2169 NULL, adapter->queues[i].evnamebuf,
2170 "Queue Packets Transmitted");
2171 #ifndef IXGBE_LEGACY_TX
2172 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2173 NULL, adapter->queues[i].evnamebuf,
2174 "Packets dropped in pcq");
2175 #endif
2176
2177 #ifdef LRO
2178 struct lro_ctrl *lro = &rxr->lro;
2179 #endif /* LRO */
2180
2181 #if 0 /* not yet */
2182 if (sysctl_createv(log, 0, &rnode, &cnode,
2183 CTLFLAG_READONLY,
2184 CTLTYPE_INT,
2185 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2186 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2187 CTL_CREATE, CTL_EOL) != 0)
2188 break;
2189
2190 if (sysctl_createv(log, 0, &rnode, &cnode,
2191 CTLFLAG_READONLY,
2192 CTLTYPE_INT,
2193 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2194 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2195 CTL_CREATE, CTL_EOL) != 0)
2196 break;
2197 #endif
2198
2199 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2200 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2201 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2202 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2203 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2204 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2205 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2206 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2207 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2208 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2209 #ifdef LRO
2210 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2211 CTLFLAG_RD, &lro->lro_queued, 0,
2212 "LRO Queued");
2213 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2214 CTLFLAG_RD, &lro->lro_flushed, 0,
2215 "LRO Flushed");
2216 #endif /* LRO */
2217 }
2218
2219 /* MAC stats get their own sub node */
2220
2221 snprintf(stats->namebuf,
2222 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2223
2224 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2225 stats->namebuf, "rx csum offload - IP");
2226 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2227 stats->namebuf, "rx csum offload - L4");
2228 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2229 stats->namebuf, "rx csum offload - IP bad");
2230 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2231 stats->namebuf, "rx csum offload - L4 bad");
2232
2233 /* Packet Reception Stats */
2234 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2235 xname, "Good Packets Received");
2236 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2237 xname, "Good Octets Received");
2238 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2239 xname, "Multicast Packets Received");
2240 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2241 xname, "Good Packets Transmitted");
2242 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2243 xname, "Good Octets Transmitted");
2244 } /* ixv_add_stats_sysctls */
2245
2246 /************************************************************************
2247 * ixv_set_sysctl_value
2248 ************************************************************************/
2249 static void
2250 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2251 const char *description, int *limit, int value)
2252 {
2253 device_t dev = adapter->dev;
2254 struct sysctllog **log;
2255 const struct sysctlnode *rnode, *cnode;
2256
2257 log = &adapter->sysctllog;
2258 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2259 aprint_error_dev(dev, "could not create sysctl root\n");
2260 return;
2261 }
2262 if (sysctl_createv(log, 0, &rnode, &cnode,
2263 CTLFLAG_READWRITE, CTLTYPE_INT,
2264 name, SYSCTL_DESCR(description),
2265 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2266 aprint_error_dev(dev, "could not create sysctl\n");
2267 *limit = value;
2268 } /* ixv_set_sysctl_value */
2269
2270 /************************************************************************
2271 * ixv_print_debug_info
2272 *
2273 * Called only when em_display_debug_stats is enabled.
2274 * Provides a way to take a look at important statistics
2275 * maintained by the driver and hardware.
2276 ************************************************************************/
2277 static void
2278 ixv_print_debug_info(struct adapter *adapter)
2279 {
2280 device_t dev = adapter->dev;
2281 struct ixgbe_hw *hw = &adapter->hw;
2282 struct ix_queue *que = adapter->queues;
2283 struct rx_ring *rxr;
2284 struct tx_ring *txr;
2285 #ifdef LRO
2286 struct lro_ctrl *lro;
2287 #endif /* LRO */
2288
2289 device_printf(dev,"Error Byte Count = %u \n",
2290 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2291
2292 for (int i = 0; i < adapter->num_queues; i++, que++) {
2293 txr = que->txr;
2294 rxr = que->rxr;
2295 #ifdef LRO
2296 lro = &rxr->lro;
2297 #endif /* LRO */
2298 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2299 que->msix, (long)que->irqs.ev_count);
2300 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2301 rxr->me, (long long)rxr->rx_packets.ev_count);
2302 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2303 rxr->me, (long)rxr->rx_bytes.ev_count);
2304 #ifdef LRO
2305 device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2306 rxr->me, (long long)lro->lro_queued);
2307 device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2308 rxr->me, (long long)lro->lro_flushed);
2309 #endif /* LRO */
2310 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2311 txr->me, (long)txr->total_packets.ev_count);
2312 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2313 txr->me, (long)txr->no_desc_avail.ev_count);
2314 }
2315
2316 device_printf(dev, "MBX IRQ Handled: %lu\n",
2317 (long)adapter->link_irq.ev_count);
2318 } /* ixv_print_debug_info */
2319
2320 /************************************************************************
2321 * ixv_sysctl_debug
2322 ************************************************************************/
2323 static int
2324 ixv_sysctl_debug(SYSCTLFN_ARGS)
2325 {
2326 struct sysctlnode node;
2327 struct adapter *adapter;
2328 int error, result;
2329
2330 node = *rnode;
2331 node.sysctl_data = &result;
2332 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2333
2334 if (error || newp == NULL)
2335 return error;
2336
2337 if (result == 1) {
2338 adapter = (struct adapter *)node.sysctl_data;
2339 ixv_print_debug_info(adapter);
2340 }
2341
2342 return 0;
2343 } /* ixv_sysctl_debug */
2344
2345 /************************************************************************
2346 * ixv_init_device_features
2347 ************************************************************************/
2348 static void
2349 ixv_init_device_features(struct adapter *adapter)
2350 {
2351 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2352 | IXGBE_FEATURE_VF
2353 | IXGBE_FEATURE_RSS
2354 | IXGBE_FEATURE_LEGACY_TX;
2355
2356 /* A tad short on feature flags for VFs, atm. */
2357 switch (adapter->hw.mac.type) {
2358 case ixgbe_mac_82599_vf:
2359 break;
2360 case ixgbe_mac_X540_vf:
2361 break;
2362 case ixgbe_mac_X550_vf:
2363 case ixgbe_mac_X550EM_x_vf:
2364 case ixgbe_mac_X550EM_a_vf:
2365 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2366 break;
2367 default:
2368 break;
2369 }
2370
2371 /* Enabled by default... */
2372 /* Is a virtual function (VF) */
2373 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2374 adapter->feat_en |= IXGBE_FEATURE_VF;
2375 /* Netmap */
2376 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2377 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2378 /* Receive-Side Scaling (RSS) */
2379 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2380 adapter->feat_en |= IXGBE_FEATURE_RSS;
2381 /* Needs advanced context descriptor regardless of offloads req'd */
2382 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2383 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2384
2385 /* Enabled via sysctl... */
2386 /* Legacy (single queue) transmit */
2387 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2388 ixv_enable_legacy_tx)
2389 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2390 } /* ixv_init_device_features */
2391
2392 /************************************************************************
2393 * ixv_shutdown - Shutdown entry point
2394 ************************************************************************/
2395 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2396 static int
2397 ixv_shutdown(device_t dev)
2398 {
2399 struct adapter *adapter = device_private(dev);
2400 IXGBE_CORE_LOCK(adapter);
2401 ixv_stop(adapter);
2402 IXGBE_CORE_UNLOCK(adapter);
2403
2404 return (0);
2405 } /* ixv_shutdown */
2406 #endif
2407
2408 static int
2409 ixv_ifflags_cb(struct ethercom *ec)
2410 {
2411 struct ifnet *ifp = &ec->ec_if;
2412 struct adapter *adapter = ifp->if_softc;
2413 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2414
2415 IXGBE_CORE_LOCK(adapter);
2416
2417 if (change != 0)
2418 adapter->if_flags = ifp->if_flags;
2419
2420 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2421 rc = ENETRESET;
2422
2423 IXGBE_CORE_UNLOCK(adapter);
2424
2425 return rc;
2426 }
2427
2428
2429 /************************************************************************
2430 * ixv_ioctl - Ioctl entry point
2431 *
2432 * Called when the user wants to configure the interface.
2433 *
2434 * return 0 on success, positive on failure
2435 ************************************************************************/
2436 static int
2437 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2438 {
2439 struct adapter *adapter = ifp->if_softc;
2440 struct ifcapreq *ifcr = data;
2441 struct ifreq *ifr = data;
2442 int error = 0;
2443 int l4csum_en;
2444 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2445 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2446
2447 switch (command) {
2448 case SIOCSIFFLAGS:
2449 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2450 break;
2451 case SIOCADDMULTI:
2452 case SIOCDELMULTI:
2453 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2454 break;
2455 case SIOCSIFMEDIA:
2456 case SIOCGIFMEDIA:
2457 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2458 break;
2459 case SIOCSIFCAP:
2460 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2461 break;
2462 case SIOCSIFMTU:
2463 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2464 break;
2465 default:
2466 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2467 break;
2468 }
2469
2470 switch (command) {
2471 case SIOCSIFMEDIA:
2472 case SIOCGIFMEDIA:
2473 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2474 case SIOCSIFCAP:
2475 /* Layer-4 Rx checksum offload has to be turned on and
2476 * off as a unit.
2477 */
2478 l4csum_en = ifcr->ifcr_capenable & l4csum;
2479 if (l4csum_en != l4csum && l4csum_en != 0)
2480 return EINVAL;
2481 /*FALLTHROUGH*/
2482 case SIOCADDMULTI:
2483 case SIOCDELMULTI:
2484 case SIOCSIFFLAGS:
2485 case SIOCSIFMTU:
2486 default:
2487 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2488 return error;
2489 if ((ifp->if_flags & IFF_RUNNING) == 0)
2490 ;
2491 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2492 IXGBE_CORE_LOCK(adapter);
2493 ixv_init_locked(adapter);
2494 IXGBE_CORE_UNLOCK(adapter);
2495 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2496 /*
2497 * Multicast list has changed; set the hardware filter
2498 * accordingly.
2499 */
2500 IXGBE_CORE_LOCK(adapter);
2501 ixv_disable_intr(adapter);
2502 ixv_set_multi(adapter);
2503 ixv_enable_intr(adapter);
2504 IXGBE_CORE_UNLOCK(adapter);
2505 }
2506 return 0;
2507 }
2508 } /* ixv_ioctl */
2509
2510 /************************************************************************
2511 * ixv_init
2512 ************************************************************************/
2513 static int
2514 ixv_init(struct ifnet *ifp)
2515 {
2516 struct adapter *adapter = ifp->if_softc;
2517
2518 IXGBE_CORE_LOCK(adapter);
2519 ixv_init_locked(adapter);
2520 IXGBE_CORE_UNLOCK(adapter);
2521
2522 return 0;
2523 } /* ixv_init */
2524
2525
2526 /************************************************************************
2527 * ixv_handle_que
2528 ************************************************************************/
2529 static void
2530 ixv_handle_que(void *context)
2531 {
2532 struct ix_queue *que = context;
2533 struct adapter *adapter = que->adapter;
2534 struct tx_ring *txr = que->txr;
2535 struct ifnet *ifp = adapter->ifp;
2536 bool more;
2537
2538 adapter->handleq.ev_count++;
2539
2540 if (ifp->if_flags & IFF_RUNNING) {
2541 more = ixgbe_rxeof(que);
2542 IXGBE_TX_LOCK(txr);
2543 ixgbe_txeof(txr);
2544 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2545 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2546 ixgbe_mq_start_locked(ifp, txr);
2547 /* Only for queue 0 */
2548 if ((&adapter->queues[0] == que)
2549 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2550 ixgbe_legacy_start_locked(ifp, txr);
2551 IXGBE_TX_UNLOCK(txr);
2552 if (more) {
2553 adapter->req.ev_count++;
2554 softint_schedule(que->que_si);
2555 return;
2556 }
2557 }
2558
2559 /* Re-enable this interrupt */
2560 ixv_enable_queue(adapter, que->msix);
2561
2562 return;
2563 } /* ixv_handle_que */
2564
2565 /************************************************************************
2566 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2567 ************************************************************************/
2568 static int
2569 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2570 {
2571 device_t dev = adapter->dev;
2572 struct ix_queue *que = adapter->queues;
2573 struct tx_ring *txr = adapter->tx_rings;
2574 int error, msix_ctrl, rid, vector = 0;
2575 pci_chipset_tag_t pc;
2576 pcitag_t tag;
2577 char intrbuf[PCI_INTRSTR_LEN];
2578 char intr_xname[32];
2579 const char *intrstr = NULL;
2580 kcpuset_t *affinity;
2581 int cpu_id = 0;
2582
2583 pc = adapter->osdep.pc;
2584 tag = adapter->osdep.tag;
2585
2586 adapter->osdep.nintrs = adapter->num_queues + 1;
2587 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2588 adapter->osdep.nintrs) != 0) {
2589 aprint_error_dev(dev,
2590 "failed to allocate MSI-X interrupt\n");
2591 return (ENXIO);
2592 }
2593
2594 kcpuset_create(&affinity, false);
2595 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2596 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2597 device_xname(dev), i);
2598 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2599 sizeof(intrbuf));
2600 #ifdef IXGBE_MPSAFE
2601 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2602 true);
2603 #endif
2604 /* Set the handler function */
2605 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2606 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2607 intr_xname);
2608 if (que->res == NULL) {
2609 pci_intr_release(pc, adapter->osdep.intrs,
2610 adapter->osdep.nintrs);
2611 aprint_error_dev(dev,
2612 "Failed to register QUE handler\n");
2613 kcpuset_destroy(affinity);
2614 return (ENXIO);
2615 }
2616 que->msix = vector;
2617 adapter->active_queues |= (u64)(1 << que->msix);
2618
2619 cpu_id = i;
2620 /* Round-robin affinity */
2621 kcpuset_zero(affinity);
2622 kcpuset_set(affinity, cpu_id % ncpu);
2623 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2624 NULL);
2625 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2626 intrstr);
2627 if (error == 0)
2628 aprint_normal(", bound queue %d to cpu %d\n",
2629 i, cpu_id % ncpu);
2630 else
2631 aprint_normal("\n");
2632
2633 #ifndef IXGBE_LEGACY_TX
2634 txr->txr_si
2635 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2636 ixgbe_deferred_mq_start, txr);
2637 #endif
2638 que->que_si
2639 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2640 ixv_handle_que, que);
2641 if (que->que_si == NULL) {
2642 aprint_error_dev(dev,
2643 "could not establish software interrupt\n");
2644 }
2645 }
2646
2647 /* and Mailbox */
2648 cpu_id++;
2649 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2650 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2651 sizeof(intrbuf));
2652 #ifdef IXGBE_MPSAFE
2653 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2654 true);
2655 #endif
2656 /* Set the mbx handler function */
2657 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2658 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2659 intr_xname);
2660 if (adapter->osdep.ihs[vector] == NULL) {
2661 adapter->res = NULL;
2662 aprint_error_dev(dev, "Failed to register LINK handler\n");
2663 kcpuset_destroy(affinity);
2664 return (ENXIO);
2665 }
2666 /* Round-robin affinity */
2667 kcpuset_zero(affinity);
2668 kcpuset_set(affinity, cpu_id % ncpu);
2669 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2670
2671 aprint_normal_dev(dev,
2672 "for link, interrupting at %s", intrstr);
2673 if (error == 0)
2674 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2675 else
2676 aprint_normal("\n");
2677
2678 adapter->vector = vector;
2679 /* Tasklets for Mailbox */
2680 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2681 ixv_handle_link, adapter);
2682 /*
2683 * Due to a broken design QEMU will fail to properly
2684 * enable the guest for MSI-X unless the vectors in
2685 * the table are all set up, so we must rewrite the
2686 * ENABLE in the MSI-X control register again at this
2687 * point to cause it to successfully initialize us.
2688 */
2689 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2690 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2691 rid += PCI_MSIX_CTL;
2692 msix_ctrl = pci_conf_read(pc, tag, rid);
2693 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2694 pci_conf_write(pc, tag, rid, msix_ctrl);
2695 }
2696
2697 kcpuset_destroy(affinity);
2698 return (0);
2699 } /* ixv_allocate_msix */
2700
2701 /************************************************************************
2702 * ixv_configure_interrupts - Setup MSI-X resources
2703 *
2704 * Note: The VF device MUST use MSI-X, there is no fallback.
2705 ************************************************************************/
2706 static int
2707 ixv_configure_interrupts(struct adapter *adapter)
2708 {
2709 device_t dev = adapter->dev;
2710 int want, queues, msgs;
2711
2712 /* Must have at least 2 MSI-X vectors */
2713 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2714 if (msgs < 2) {
2715 aprint_error_dev(dev,"MSIX config error\n");
2716 return (ENXIO);
2717 }
2718 msgs = MIN(msgs, IXG_MAX_NINTR);
2719
2720 /* Figure out a reasonable auto config value */
2721 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2722
2723 if (ixv_num_queues != 0)
2724 queues = ixv_num_queues;
2725 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
2726 queues = IXGBE_VF_MAX_TX_QUEUES;
2727
2728 /*
2729 * Want vectors for the queues,
2730 * plus an additional for mailbox.
2731 */
2732 want = queues + 1;
2733 if (msgs >= want)
2734 msgs = want;
2735 else {
2736 aprint_error_dev(dev,
2737 "MSI-X Configuration Problem, "
2738 "%d vectors but %d queues wanted!\n",
2739 msgs, want);
2740 return -1;
2741 }
2742
2743 adapter->msix_mem = (void *)1; /* XXX */
2744 aprint_normal_dev(dev,
2745 "Using MSI-X interrupts with %d vectors\n", msgs);
2746 adapter->num_queues = queues;
2747
2748 return (0);
2749 } /* ixv_configure_interrupts */
2750
2751
2752 /************************************************************************
2753 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2754 *
2755 * Done outside of interrupt context since the driver might sleep
2756 ************************************************************************/
2757 static void
2758 ixv_handle_link(void *context)
2759 {
2760 struct adapter *adapter = context;
2761
2762 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2763 &adapter->link_up, FALSE);
2764 ixv_update_link_status(adapter);
2765 } /* ixv_handle_link */
2766
2767 /************************************************************************
2768 * ixv_check_link - Used in the local timer to poll for link changes
2769 ************************************************************************/
2770 static void
2771 ixv_check_link(struct adapter *adapter)
2772 {
2773 adapter->hw.mac.get_link_status = TRUE;
2774
2775 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2776 &adapter->link_up, FALSE);
2777 ixv_update_link_status(adapter);
2778 } /* ixv_check_link */
2779