ixv.c revision 1.80 1 /*$NetBSD: ixv.c,v 1.80 2018/02/22 08:49:42 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct ix_queue *, uint32_t);
122
123 static void ixv_setup_vlan_support(struct adapter *);
124 #if 0
125 static void ixv_register_vlan(void *, struct ifnet *, u16);
126 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
127 #endif
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134 static void ixv_set_sysctl_value(struct adapter *, const char *,
135 const char *, int *, int);
136
137 /* The MSI-X Interrupt handlers */
138 static int ixv_msix_que(void *);
139 static int ixv_msix_mbx(void *);
140
141 /* Deferred interrupt tasklets */
142 static void ixv_handle_que(void *);
143 static void ixv_handle_link(void *);
144
145 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
146 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
147
148 /************************************************************************
149 * FreeBSD Device Interface Entry Points
150 ************************************************************************/
151 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
152 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
153 DVF_DETACH_SHUTDOWN);
154
155 #if 0
156 static driver_t ixv_driver = {
157 "ixv", ixv_methods, sizeof(struct adapter),
158 };
159
160 devclass_t ixv_devclass;
161 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
162 MODULE_DEPEND(ixv, pci, 1, 1, 1);
163 MODULE_DEPEND(ixv, ether, 1, 1, 1);
164 #endif
165
166 /*
167 * TUNEABLE PARAMETERS:
168 */
169
170 /* Number of Queues - do not exceed MSI-X vectors - 1 */
171 static int ixv_num_queues = 0;
172 #define TUNABLE_INT(__x, __y)
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174
175 /*
176 * AIM: Adaptive Interrupt Moderation
177 * which means that the interrupt rate
178 * is varied over time based on the
179 * traffic for that interrupt vector
180 */
181 static bool ixv_enable_aim = false;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191
192 /*
193 * Number of TX descriptors per ring,
194 * setting higher than RX as this seems
195 * the better performing choice.
196 */
197 static int ixv_txd = PERFORM_TXD;
198 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
199
200 /* Number of RX descriptors per ring */
201 static int ixv_rxd = PERFORM_RXD;
202 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
203
204 /* Legacy Transmit (single queue) */
205 static int ixv_enable_legacy_tx = 0;
206 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
207
208 #ifdef NET_MPSAFE
209 #define IXGBE_MPSAFE 1
210 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
211 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
212 #else
213 #define IXGBE_CALLOUT_FLAGS 0
214 #define IXGBE_SOFTINFT_FLAGS 0
215 #endif
216
217 #if 0
218 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
219 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
220 #endif
221
222 /************************************************************************
223 * ixv_probe - Device identification routine
224 *
225 * Determines if the driver should be loaded on
226 * adapter based on its PCI vendor/device ID.
227 *
228 * return BUS_PROBE_DEFAULT on success, positive on failure
229 ************************************************************************/
230 static int
231 ixv_probe(device_t dev, cfdata_t cf, void *aux)
232 {
233 #ifdef __HAVE_PCI_MSI_MSIX
234 const struct pci_attach_args *pa = aux;
235
236 return (ixv_lookup(pa) != NULL) ? 1 : 0;
237 #else
238 return 0;
239 #endif
240 } /* ixv_probe */
241
242 static ixgbe_vendor_info_t *
243 ixv_lookup(const struct pci_attach_args *pa)
244 {
245 ixgbe_vendor_info_t *ent;
246 pcireg_t subid;
247
248 INIT_DEBUGOUT("ixv_lookup: begin");
249
250 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
251 return NULL;
252
253 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
254
255 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
256 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
257 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
258 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
260 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
261 (ent->subdevice_id == 0))) {
262 return ent;
263 }
264 }
265
266 return NULL;
267 }
268
269 /************************************************************************
270 * ixv_attach - Device initialization routine
271 *
272 * Called when the driver is being loaded.
273 * Identifies the type of hardware, allocates all resources
274 * and initializes the hardware.
275 *
276 * return 0 on success, positive on failure
277 ************************************************************************/
278 static void
279 ixv_attach(device_t parent, device_t dev, void *aux)
280 {
281 struct adapter *adapter;
282 struct ixgbe_hw *hw;
283 int error = 0;
284 pcireg_t id, subid;
285 ixgbe_vendor_info_t *ent;
286 const struct pci_attach_args *pa = aux;
287 const char *apivstr;
288 const char *str;
289 char buf[256];
290
291 INIT_DEBUGOUT("ixv_attach: begin");
292
293 /*
294 * Make sure BUSMASTER is set, on a VM under
295 * KVM it may not be and will break things.
296 */
297 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
298
299 /* Allocate, clear, and link in our adapter structure */
300 adapter = device_private(dev);
301 adapter->dev = dev;
302 adapter->hw.back = adapter;
303 hw = &adapter->hw;
304
305 adapter->init_locked = ixv_init_locked;
306 adapter->stop_locked = ixv_stop;
307
308 adapter->osdep.pc = pa->pa_pc;
309 adapter->osdep.tag = pa->pa_tag;
310 if (pci_dma64_available(pa))
311 adapter->osdep.dmat = pa->pa_dmat64;
312 else
313 adapter->osdep.dmat = pa->pa_dmat;
314 adapter->osdep.attached = false;
315
316 ent = ixv_lookup(pa);
317
318 KASSERT(ent != NULL);
319
320 aprint_normal(": %s, Version - %s\n",
321 ixv_strings[ent->index], ixv_driver_version);
322
323 /* Core Lock Init*/
324 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
325
326 /* Do base PCI setup - map BAR0 */
327 if (ixv_allocate_pci_resources(adapter, pa)) {
328 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
329 error = ENXIO;
330 goto err_out;
331 }
332
333 /* SYSCTL APIs */
334 ixv_add_device_sysctls(adapter);
335
336 /* Set up the timer callout */
337 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
338
339 /* Save off the information about this board */
340 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
341 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
342 hw->vendor_id = PCI_VENDOR(id);
343 hw->device_id = PCI_PRODUCT(id);
344 hw->revision_id =
345 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
346 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
347 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
348
349 /* A subset of set_mac_type */
350 switch (hw->device_id) {
351 case IXGBE_DEV_ID_82599_VF:
352 hw->mac.type = ixgbe_mac_82599_vf;
353 str = "82599 VF";
354 break;
355 case IXGBE_DEV_ID_X540_VF:
356 hw->mac.type = ixgbe_mac_X540_vf;
357 str = "X540 VF";
358 break;
359 case IXGBE_DEV_ID_X550_VF:
360 hw->mac.type = ixgbe_mac_X550_vf;
361 str = "X550 VF";
362 break;
363 case IXGBE_DEV_ID_X550EM_X_VF:
364 hw->mac.type = ixgbe_mac_X550EM_x_vf;
365 str = "X550EM X VF";
366 break;
367 case IXGBE_DEV_ID_X550EM_A_VF:
368 hw->mac.type = ixgbe_mac_X550EM_a_vf;
369 str = "X550EM A VF";
370 break;
371 default:
372 /* Shouldn't get here since probe succeeded */
373 aprint_error_dev(dev, "Unknown device ID!\n");
374 error = ENXIO;
375 goto err_out;
376 break;
377 }
378 aprint_normal_dev(dev, "device %s\n", str);
379
380 ixv_init_device_features(adapter);
381
382 /* Initialize the shared code */
383 error = ixgbe_init_ops_vf(hw);
384 if (error) {
385 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
386 error = EIO;
387 goto err_out;
388 }
389
390 /* Setup the mailbox */
391 ixgbe_init_mbx_params_vf(hw);
392
393 /* Set the right number of segments */
394 adapter->num_segs = IXGBE_82599_SCATTER;
395
396 /* Reset mbox api to 1.0 */
397 error = hw->mac.ops.reset_hw(hw);
398 if (error == IXGBE_ERR_RESET_FAILED)
399 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
400 else if (error)
401 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
402 error);
403 if (error) {
404 error = EIO;
405 goto err_out;
406 }
407
408 error = hw->mac.ops.init_hw(hw);
409 if (error) {
410 aprint_error_dev(dev, "...init_hw() failed!\n");
411 error = EIO;
412 goto err_out;
413 }
414
415 /* Negotiate mailbox API version */
416 error = ixv_negotiate_api(adapter);
417 if (error)
418 aprint_normal_dev(dev,
419 "MBX API negotiation failed during attach!\n");
420 switch (hw->api_version) {
421 case ixgbe_mbox_api_10:
422 apivstr = "1.0";
423 break;
424 case ixgbe_mbox_api_20:
425 apivstr = "2.0";
426 break;
427 case ixgbe_mbox_api_11:
428 apivstr = "1.1";
429 break;
430 case ixgbe_mbox_api_12:
431 apivstr = "1.2";
432 break;
433 case ixgbe_mbox_api_13:
434 apivstr = "1.3";
435 break;
436 default:
437 apivstr = "unknown";
438 break;
439 }
440 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
441
442 /* If no mac address was assigned, make a random one */
443 if (!ixv_check_ether_addr(hw->mac.addr)) {
444 u8 addr[ETHER_ADDR_LEN];
445 uint64_t rndval = cprng_strong64();
446
447 memcpy(addr, &rndval, sizeof(addr));
448 addr[0] &= 0xFE;
449 addr[0] |= 0x02;
450 bcopy(addr, hw->mac.addr, sizeof(addr));
451 }
452
453 /* Register for VLAN events */
454 #if 0 /* XXX delete after write? */
455 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
456 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
457 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
458 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
459 #endif
460
461 /* Sysctls for limiting the amount of work done in the taskqueues */
462 ixv_set_sysctl_value(adapter, "rx_processing_limit",
463 "max number of rx packets to process",
464 &adapter->rx_process_limit, ixv_rx_process_limit);
465
466 ixv_set_sysctl_value(adapter, "tx_processing_limit",
467 "max number of tx packets to process",
468 &adapter->tx_process_limit, ixv_tx_process_limit);
469
470 /* Do descriptor calc and sanity checks */
471 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
472 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
473 aprint_error_dev(dev, "TXD config issue, using default!\n");
474 adapter->num_tx_desc = DEFAULT_TXD;
475 } else
476 adapter->num_tx_desc = ixv_txd;
477
478 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
479 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
480 aprint_error_dev(dev, "RXD config issue, using default!\n");
481 adapter->num_rx_desc = DEFAULT_RXD;
482 } else
483 adapter->num_rx_desc = ixv_rxd;
484
485 /* Setup MSI-X */
486 error = ixv_configure_interrupts(adapter);
487 if (error)
488 goto err_out;
489
490 /* Allocate our TX/RX Queues */
491 if (ixgbe_allocate_queues(adapter)) {
492 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
493 error = ENOMEM;
494 goto err_out;
495 }
496
497 /* hw.ix defaults init */
498 adapter->enable_aim = ixv_enable_aim;
499
500 error = ixv_allocate_msix(adapter, pa);
501 if (error) {
502 device_printf(dev, "ixv_allocate_msix() failed!\n");
503 goto err_late;
504 }
505
506 /* Setup OS specific network interface */
507 error = ixv_setup_interface(dev, adapter);
508 if (error != 0) {
509 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
510 goto err_late;
511 }
512
513 /* Do the stats setup */
514 ixv_save_stats(adapter);
515 ixv_init_stats(adapter);
516 ixv_add_stats_sysctls(adapter);
517
518 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
519 ixgbe_netmap_attach(adapter);
520
521 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
522 aprint_verbose_dev(dev, "feature cap %s\n", buf);
523 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
524 aprint_verbose_dev(dev, "feature ena %s\n", buf);
525
526 INIT_DEBUGOUT("ixv_attach: end");
527 adapter->osdep.attached = true;
528
529 return;
530
531 err_late:
532 ixgbe_free_transmit_structures(adapter);
533 ixgbe_free_receive_structures(adapter);
534 free(adapter->queues, M_DEVBUF);
535 err_out:
536 ixv_free_pci_resources(adapter);
537 IXGBE_CORE_LOCK_DESTROY(adapter);
538
539 return;
540 } /* ixv_attach */
541
542 /************************************************************************
543 * ixv_detach - Device removal routine
544 *
545 * Called when the driver is being removed.
546 * Stops the adapter and deallocates all the resources
547 * that were allocated for driver operation.
548 *
549 * return 0 on success, positive on failure
550 ************************************************************************/
551 static int
552 ixv_detach(device_t dev, int flags)
553 {
554 struct adapter *adapter = device_private(dev);
555 struct ixgbe_hw *hw = &adapter->hw;
556 struct ix_queue *que = adapter->queues;
557 struct tx_ring *txr = adapter->tx_rings;
558 struct rx_ring *rxr = adapter->rx_rings;
559 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
560
561 INIT_DEBUGOUT("ixv_detach: begin");
562 if (adapter->osdep.attached == false)
563 return 0;
564
565 /* Stop the interface. Callouts are stopped in it. */
566 ixv_ifstop(adapter->ifp, 1);
567
568 #if NVLAN > 0
569 /* Make sure VLANs are not using driver */
570 if (!VLAN_ATTACHED(&adapter->osdep.ec))
571 ; /* nothing to do: no VLANs */
572 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
573 vlan_ifdetach(adapter->ifp);
574 else {
575 aprint_error_dev(dev, "VLANs in use, detach first\n");
576 return EBUSY;
577 }
578 #endif
579
580 IXGBE_CORE_LOCK(adapter);
581 ixv_stop(adapter);
582 IXGBE_CORE_UNLOCK(adapter);
583
584 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
585 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
586 softint_disestablish(txr->txr_si);
587 softint_disestablish(que->que_si);
588 }
589
590 /* Drain the Mailbox(link) queue */
591 softint_disestablish(adapter->link_si);
592
593 /* Unregister VLAN events */
594 #if 0 /* XXX msaitoh delete after write? */
595 if (adapter->vlan_attach != NULL)
596 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
597 if (adapter->vlan_detach != NULL)
598 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
599 #endif
600
601 ether_ifdetach(adapter->ifp);
602 callout_halt(&adapter->timer, NULL);
603
604 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
605 netmap_detach(adapter->ifp);
606
607 ixv_free_pci_resources(adapter);
608 #if 0 /* XXX the NetBSD port is probably missing something here */
609 bus_generic_detach(dev);
610 #endif
611 if_detach(adapter->ifp);
612 if_percpuq_destroy(adapter->ipq);
613
614 sysctl_teardown(&adapter->sysctllog);
615 evcnt_detach(&adapter->handleq);
616 evcnt_detach(&adapter->req);
617 evcnt_detach(&adapter->efbig_tx_dma_setup);
618 evcnt_detach(&adapter->mbuf_defrag_failed);
619 evcnt_detach(&adapter->efbig2_tx_dma_setup);
620 evcnt_detach(&adapter->einval_tx_dma_setup);
621 evcnt_detach(&adapter->other_tx_dma_setup);
622 evcnt_detach(&adapter->eagain_tx_dma_setup);
623 evcnt_detach(&adapter->enomem_tx_dma_setup);
624 evcnt_detach(&adapter->watchdog_events);
625 evcnt_detach(&adapter->tso_err);
626 evcnt_detach(&adapter->link_irq);
627
628 txr = adapter->tx_rings;
629 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
630 evcnt_detach(&adapter->queues[i].irqs);
631 evcnt_detach(&txr->no_desc_avail);
632 evcnt_detach(&txr->total_packets);
633 evcnt_detach(&txr->tso_tx);
634 #ifndef IXGBE_LEGACY_TX
635 evcnt_detach(&txr->pcq_drops);
636 #endif
637
638 evcnt_detach(&rxr->rx_packets);
639 evcnt_detach(&rxr->rx_bytes);
640 evcnt_detach(&rxr->rx_copies);
641 evcnt_detach(&rxr->no_jmbuf);
642 evcnt_detach(&rxr->rx_discarded);
643 }
644 evcnt_detach(&stats->ipcs);
645 evcnt_detach(&stats->l4cs);
646 evcnt_detach(&stats->ipcs_bad);
647 evcnt_detach(&stats->l4cs_bad);
648
649 /* Packet Reception Stats */
650 evcnt_detach(&stats->vfgorc);
651 evcnt_detach(&stats->vfgprc);
652 evcnt_detach(&stats->vfmprc);
653
654 /* Packet Transmission Stats */
655 evcnt_detach(&stats->vfgotc);
656 evcnt_detach(&stats->vfgptc);
657
658 /* Mailbox Stats */
659 evcnt_detach(&hw->mbx.stats.msgs_tx);
660 evcnt_detach(&hw->mbx.stats.msgs_rx);
661 evcnt_detach(&hw->mbx.stats.acks);
662 evcnt_detach(&hw->mbx.stats.reqs);
663 evcnt_detach(&hw->mbx.stats.rsts);
664
665 ixgbe_free_transmit_structures(adapter);
666 ixgbe_free_receive_structures(adapter);
667 free(adapter->queues, M_DEVBUF);
668
669 IXGBE_CORE_LOCK_DESTROY(adapter);
670
671 return (0);
672 } /* ixv_detach */
673
674 /************************************************************************
675 * ixv_init_locked - Init entry point
676 *
677 * Used in two ways: It is used by the stack as an init entry
678 * point in network interface structure. It is also used
679 * by the driver as a hw/sw initialization routine to get
680 * to a consistent state.
681 *
682 * return 0 on success, positive on failure
683 ************************************************************************/
684 static void
685 ixv_init_locked(struct adapter *adapter)
686 {
687 struct ifnet *ifp = adapter->ifp;
688 device_t dev = adapter->dev;
689 struct ixgbe_hw *hw = &adapter->hw;
690 struct ix_queue *que = adapter->queues;
691 int error = 0;
692 uint32_t mask;
693 int i;
694
695 INIT_DEBUGOUT("ixv_init_locked: begin");
696 KASSERT(mutex_owned(&adapter->core_mtx));
697 hw->adapter_stopped = FALSE;
698 hw->mac.ops.stop_adapter(hw);
699 callout_stop(&adapter->timer);
700
701 /* reprogram the RAR[0] in case user changed it. */
702 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
703
704 /* Get the latest mac address, User can use a LAA */
705 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
706 IXGBE_ETH_LENGTH_OF_ADDRESS);
707 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
708
709 /* Prepare transmit descriptors and buffers */
710 if (ixgbe_setup_transmit_structures(adapter)) {
711 aprint_error_dev(dev, "Could not setup transmit structures\n");
712 ixv_stop(adapter);
713 return;
714 }
715
716 /* Reset VF and renegotiate mailbox API version */
717 hw->mac.ops.reset_hw(hw);
718 error = ixv_negotiate_api(adapter);
719 if (error)
720 device_printf(dev,
721 "Mailbox API negotiation failed in init_locked!\n");
722
723 ixv_initialize_transmit_units(adapter);
724
725 /* Setup Multicast table */
726 ixv_set_multi(adapter);
727
728 /*
729 * Determine the correct mbuf pool
730 * for doing jumbo/headersplit
731 */
732 if (ifp->if_mtu > ETHERMTU)
733 adapter->rx_mbuf_sz = MJUMPAGESIZE;
734 else
735 adapter->rx_mbuf_sz = MCLBYTES;
736
737 /* Prepare receive descriptors and buffers */
738 if (ixgbe_setup_receive_structures(adapter)) {
739 device_printf(dev, "Could not setup receive structures\n");
740 ixv_stop(adapter);
741 return;
742 }
743
744 /* Configure RX settings */
745 ixv_initialize_receive_units(adapter);
746
747 #if 0 /* XXX isn't it required? -- msaitoh */
748 /* Set the various hardware offload abilities */
749 ifp->if_hwassist = 0;
750 if (ifp->if_capenable & IFCAP_TSO4)
751 ifp->if_hwassist |= CSUM_TSO;
752 if (ifp->if_capenable & IFCAP_TXCSUM) {
753 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
754 #if __FreeBSD_version >= 800000
755 ifp->if_hwassist |= CSUM_SCTP;
756 #endif
757 }
758 #endif
759
760 /* Set up VLAN offload and filter */
761 ixv_setup_vlan_support(adapter);
762
763 /* Set up MSI-X routing */
764 ixv_configure_ivars(adapter);
765
766 /* Set up auto-mask */
767 mask = (1 << adapter->vector);
768 for (i = 0; i < adapter->num_queues; i++, que++)
769 mask |= (1 << que->msix);
770 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
771
772 /* Set moderation on the Link interrupt */
773 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
774
775 /* Stats init */
776 ixv_init_stats(adapter);
777
778 /* Config/Enable Link */
779 hw->mac.get_link_status = TRUE;
780 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
781 FALSE);
782
783 /* Start watchdog */
784 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
785
786 /* And now turn on interrupts */
787 ixv_enable_intr(adapter);
788
789 /* Update saved flags. See ixgbe_ifflags_cb() */
790 adapter->if_flags = ifp->if_flags;
791
792 /* Now inform the stack we're ready */
793 ifp->if_flags |= IFF_RUNNING;
794 ifp->if_flags &= ~IFF_OACTIVE;
795
796 return;
797 } /* ixv_init_locked */
798
799 /*
800 * MSI-X Interrupt Handlers and Tasklets
801 */
802
803 static inline void
804 ixv_enable_queue(struct adapter *adapter, u32 vector)
805 {
806 struct ixgbe_hw *hw = &adapter->hw;
807 u32 queue = 1 << vector;
808 u32 mask;
809
810 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
811 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
812 } /* ixv_enable_queue */
813
814 static inline void
815 ixv_disable_queue(struct adapter *adapter, u32 vector)
816 {
817 struct ixgbe_hw *hw = &adapter->hw;
818 u64 queue = (u64)(1 << vector);
819 u32 mask;
820
821 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
822 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
823 } /* ixv_disable_queue */
824
825 static inline void
826 ixv_rearm_queues(struct adapter *adapter, u64 queues)
827 {
828 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
829 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
830 } /* ixv_rearm_queues */
831
832
833 /************************************************************************
834 * ixv_msix_que - MSI Queue Interrupt Service routine
835 ************************************************************************/
836 static int
837 ixv_msix_que(void *arg)
838 {
839 struct ix_queue *que = arg;
840 struct adapter *adapter = que->adapter;
841 struct tx_ring *txr = que->txr;
842 struct rx_ring *rxr = que->rxr;
843 bool more;
844 u32 newitr = 0;
845
846 ixv_disable_queue(adapter, que->msix);
847 ++que->irqs.ev_count;
848
849 #ifdef __NetBSD__
850 /* Don't run ixgbe_rxeof in interrupt context */
851 more = true;
852 #else
853 more = ixgbe_rxeof(que);
854 #endif
855
856 IXGBE_TX_LOCK(txr);
857 ixgbe_txeof(txr);
858 IXGBE_TX_UNLOCK(txr);
859
860 /* Do AIM now? */
861
862 if (adapter->enable_aim == false)
863 goto no_calc;
864 /*
865 * Do Adaptive Interrupt Moderation:
866 * - Write out last calculated setting
867 * - Calculate based on average size over
868 * the last interval.
869 */
870 if (que->eitr_setting)
871 ixv_eitr_write(que, que->eitr_setting);
872
873 que->eitr_setting = 0;
874
875 /* Idle, do nothing */
876 if ((txr->bytes == 0) && (rxr->bytes == 0))
877 goto no_calc;
878
879 if ((txr->bytes) && (txr->packets))
880 newitr = txr->bytes/txr->packets;
881 if ((rxr->bytes) && (rxr->packets))
882 newitr = max(newitr, (rxr->bytes / rxr->packets));
883 newitr += 24; /* account for hardware frame, crc */
884
885 /* set an upper boundary */
886 newitr = min(newitr, 3000);
887
888 /* Be nice to the mid range */
889 if ((newitr > 300) && (newitr < 1200))
890 newitr = (newitr / 3);
891 else
892 newitr = (newitr / 2);
893
894 /*
895 * When RSC is used, ITR interval must be larger than RSC_DELAY.
896 * Currently, we use 2us for RSC_DELAY. The minimum value is always
897 * greater than 2us on 100M (and 10M?(not documented)), but it's not
898 * on 1G and higher.
899 */
900 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
901 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
902 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
903 newitr = IXGBE_MIN_RSC_EITR_10G1G;
904 }
905
906 /* save for next interrupt */
907 que->eitr_setting = newitr;
908
909 /* Reset state */
910 txr->bytes = 0;
911 txr->packets = 0;
912 rxr->bytes = 0;
913 rxr->packets = 0;
914
915 no_calc:
916 if (more)
917 softint_schedule(que->que_si);
918 else /* Re-enable this interrupt */
919 ixv_enable_queue(adapter, que->msix);
920
921 return 1;
922 } /* ixv_msix_que */
923
924 /************************************************************************
925 * ixv_msix_mbx
926 ************************************************************************/
927 static int
928 ixv_msix_mbx(void *arg)
929 {
930 struct adapter *adapter = arg;
931 struct ixgbe_hw *hw = &adapter->hw;
932
933 ++adapter->link_irq.ev_count;
934 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
935
936 /* Link status change */
937 hw->mac.get_link_status = TRUE;
938 softint_schedule(adapter->link_si);
939
940 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
941
942 return 1;
943 } /* ixv_msix_mbx */
944
945 static void
946 ixv_eitr_write(struct ix_queue *que, uint32_t itr)
947 {
948 struct adapter *adapter = que->adapter;
949
950 /*
951 * Newer devices than 82598 have VF function, so this function is
952 * simple.
953 */
954 itr |= IXGBE_EITR_CNT_WDIS;
955
956 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), itr);
957 }
958
959
960 /************************************************************************
961 * ixv_media_status - Media Ioctl callback
962 *
963 * Called whenever the user queries the status of
964 * the interface using ifconfig.
965 ************************************************************************/
966 static void
967 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
968 {
969 struct adapter *adapter = ifp->if_softc;
970
971 INIT_DEBUGOUT("ixv_media_status: begin");
972 IXGBE_CORE_LOCK(adapter);
973 ixv_update_link_status(adapter);
974
975 ifmr->ifm_status = IFM_AVALID;
976 ifmr->ifm_active = IFM_ETHER;
977
978 if (!adapter->link_active) {
979 ifmr->ifm_active |= IFM_NONE;
980 IXGBE_CORE_UNLOCK(adapter);
981 return;
982 }
983
984 ifmr->ifm_status |= IFM_ACTIVE;
985
986 switch (adapter->link_speed) {
987 case IXGBE_LINK_SPEED_10GB_FULL:
988 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
989 break;
990 case IXGBE_LINK_SPEED_5GB_FULL:
991 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
992 break;
993 case IXGBE_LINK_SPEED_2_5GB_FULL:
994 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
995 break;
996 case IXGBE_LINK_SPEED_1GB_FULL:
997 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
998 break;
999 case IXGBE_LINK_SPEED_100_FULL:
1000 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1001 break;
1002 case IXGBE_LINK_SPEED_10_FULL:
1003 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1004 break;
1005 }
1006
1007 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1008
1009 IXGBE_CORE_UNLOCK(adapter);
1010
1011 return;
1012 } /* ixv_media_status */
1013
1014 /************************************************************************
1015 * ixv_media_change - Media Ioctl callback
1016 *
1017 * Called when the user changes speed/duplex using
1018 * media/mediopt option with ifconfig.
1019 ************************************************************************/
1020 static int
1021 ixv_media_change(struct ifnet *ifp)
1022 {
1023 struct adapter *adapter = ifp->if_softc;
1024 struct ifmedia *ifm = &adapter->media;
1025
1026 INIT_DEBUGOUT("ixv_media_change: begin");
1027
1028 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1029 return (EINVAL);
1030
1031 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1032 case IFM_AUTO:
1033 break;
1034 default:
1035 device_printf(adapter->dev, "Only auto media type\n");
1036 return (EINVAL);
1037 }
1038
1039 return (0);
1040 } /* ixv_media_change */
1041
1042
1043 /************************************************************************
1044 * ixv_negotiate_api
1045 *
1046 * Negotiate the Mailbox API with the PF;
1047 * start with the most featured API first.
1048 ************************************************************************/
1049 static int
1050 ixv_negotiate_api(struct adapter *adapter)
1051 {
1052 struct ixgbe_hw *hw = &adapter->hw;
1053 int mbx_api[] = { ixgbe_mbox_api_11,
1054 ixgbe_mbox_api_10,
1055 ixgbe_mbox_api_unknown };
1056 int i = 0;
1057
1058 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1059 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1060 return (0);
1061 i++;
1062 }
1063
1064 return (EINVAL);
1065 } /* ixv_negotiate_api */
1066
1067
1068 /************************************************************************
1069 * ixv_set_multi - Multicast Update
1070 *
1071 * Called whenever multicast address list is updated.
1072 ************************************************************************/
1073 static void
1074 ixv_set_multi(struct adapter *adapter)
1075 {
1076 struct ether_multi *enm;
1077 struct ether_multistep step;
1078 struct ethercom *ec = &adapter->osdep.ec;
1079 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1080 u8 *update_ptr;
1081 int mcnt = 0;
1082
1083 KASSERT(mutex_owned(&adapter->core_mtx));
1084 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1085
1086 ETHER_LOCK(ec);
1087 ETHER_FIRST_MULTI(step, ec, enm);
1088 while (enm != NULL) {
1089 bcopy(enm->enm_addrlo,
1090 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1091 IXGBE_ETH_LENGTH_OF_ADDRESS);
1092 mcnt++;
1093 /* XXX This might be required --msaitoh */
1094 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1095 break;
1096 ETHER_NEXT_MULTI(step, enm);
1097 }
1098 ETHER_UNLOCK(ec);
1099
1100 update_ptr = mta;
1101
1102 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1103 ixv_mc_array_itr, TRUE);
1104
1105 return;
1106 } /* ixv_set_multi */
1107
1108 /************************************************************************
1109 * ixv_mc_array_itr
1110 *
1111 * An iterator function needed by the multicast shared code.
1112 * It feeds the shared code routine the addresses in the
1113 * array of ixv_set_multi() one by one.
1114 ************************************************************************/
1115 static u8 *
1116 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1117 {
1118 u8 *addr = *update_ptr;
1119 u8 *newptr;
1120 *vmdq = 0;
1121
1122 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1123 *update_ptr = newptr;
1124
1125 return addr;
1126 } /* ixv_mc_array_itr */
1127
1128 /************************************************************************
1129 * ixv_local_timer - Timer routine
1130 *
1131 * Checks for link status, updates statistics,
1132 * and runs the watchdog check.
1133 ************************************************************************/
1134 static void
1135 ixv_local_timer(void *arg)
1136 {
1137 struct adapter *adapter = arg;
1138
1139 IXGBE_CORE_LOCK(adapter);
1140 ixv_local_timer_locked(adapter);
1141 IXGBE_CORE_UNLOCK(adapter);
1142 }
1143
1144 static void
1145 ixv_local_timer_locked(void *arg)
1146 {
1147 struct adapter *adapter = arg;
1148 device_t dev = adapter->dev;
1149 struct ix_queue *que = adapter->queues;
1150 u64 queues = 0;
1151 int hung = 0;
1152
1153 KASSERT(mutex_owned(&adapter->core_mtx));
1154
1155 ixv_check_link(adapter);
1156
1157 /* Stats Update */
1158 ixv_update_stats(adapter);
1159
1160 /*
1161 * Check the TX queues status
1162 * - mark hung queues so we don't schedule on them
1163 * - watchdog only if all queues show hung
1164 */
1165 for (int i = 0; i < adapter->num_queues; i++, que++) {
1166 /* Keep track of queues with work for soft irq */
1167 if (que->txr->busy)
1168 queues |= ((u64)1 << que->me);
1169 /*
1170 * Each time txeof runs without cleaning, but there
1171 * are uncleaned descriptors it increments busy. If
1172 * we get to the MAX we declare it hung.
1173 */
1174 if (que->busy == IXGBE_QUEUE_HUNG) {
1175 ++hung;
1176 /* Mark the queue as inactive */
1177 adapter->active_queues &= ~((u64)1 << que->me);
1178 continue;
1179 } else {
1180 /* Check if we've come back from hung */
1181 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1182 adapter->active_queues |= ((u64)1 << que->me);
1183 }
1184 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1185 device_printf(dev,
1186 "Warning queue %d appears to be hung!\n", i);
1187 que->txr->busy = IXGBE_QUEUE_HUNG;
1188 ++hung;
1189 }
1190 }
1191
1192 /* Only truly watchdog if all queues show hung */
1193 if (hung == adapter->num_queues)
1194 goto watchdog;
1195 else if (queues != 0) { /* Force an IRQ on queues with work */
1196 ixv_rearm_queues(adapter, queues);
1197 }
1198
1199 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1200
1201 return;
1202
1203 watchdog:
1204
1205 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1206 adapter->ifp->if_flags &= ~IFF_RUNNING;
1207 adapter->watchdog_events.ev_count++;
1208 ixv_init_locked(adapter);
1209 } /* ixv_local_timer */
1210
1211 /************************************************************************
1212 * ixv_update_link_status - Update OS on link state
1213 *
1214 * Note: Only updates the OS on the cached link state.
1215 * The real check of the hardware only happens with
1216 * a link interrupt.
1217 ************************************************************************/
1218 static void
1219 ixv_update_link_status(struct adapter *adapter)
1220 {
1221 struct ifnet *ifp = adapter->ifp;
1222 device_t dev = adapter->dev;
1223
1224 if (adapter->link_up) {
1225 if (adapter->link_active == FALSE) {
1226 if (bootverbose) {
1227 const char *bpsmsg;
1228
1229 switch (adapter->link_speed) {
1230 case IXGBE_LINK_SPEED_10GB_FULL:
1231 bpsmsg = "10 Gbps";
1232 break;
1233 case IXGBE_LINK_SPEED_5GB_FULL:
1234 bpsmsg = "5 Gbps";
1235 break;
1236 case IXGBE_LINK_SPEED_2_5GB_FULL:
1237 bpsmsg = "2.5 Gbps";
1238 break;
1239 case IXGBE_LINK_SPEED_1GB_FULL:
1240 bpsmsg = "1 Gbps";
1241 break;
1242 case IXGBE_LINK_SPEED_100_FULL:
1243 bpsmsg = "100 Mbps";
1244 break;
1245 case IXGBE_LINK_SPEED_10_FULL:
1246 bpsmsg = "10 Mbps";
1247 break;
1248 default:
1249 bpsmsg = "unknown speed";
1250 break;
1251 }
1252 device_printf(dev, "Link is up %s %s \n",
1253 bpsmsg, "Full Duplex");
1254 }
1255 adapter->link_active = TRUE;
1256 if_link_state_change(ifp, LINK_STATE_UP);
1257 }
1258 } else { /* Link down */
1259 if (adapter->link_active == TRUE) {
1260 if (bootverbose)
1261 device_printf(dev, "Link is Down\n");
1262 if_link_state_change(ifp, LINK_STATE_DOWN);
1263 adapter->link_active = FALSE;
1264 }
1265 }
1266
1267 return;
1268 } /* ixv_update_link_status */
1269
1270
1271 /************************************************************************
1272 * ixv_stop - Stop the hardware
1273 *
1274 * Disables all traffic on the adapter by issuing a
1275 * global reset on the MAC and deallocates TX/RX buffers.
1276 ************************************************************************/
1277 static void
1278 ixv_ifstop(struct ifnet *ifp, int disable)
1279 {
1280 struct adapter *adapter = ifp->if_softc;
1281
1282 IXGBE_CORE_LOCK(adapter);
1283 ixv_stop(adapter);
1284 IXGBE_CORE_UNLOCK(adapter);
1285 }
1286
1287 static void
1288 ixv_stop(void *arg)
1289 {
1290 struct ifnet *ifp;
1291 struct adapter *adapter = arg;
1292 struct ixgbe_hw *hw = &adapter->hw;
1293
1294 ifp = adapter->ifp;
1295
1296 KASSERT(mutex_owned(&adapter->core_mtx));
1297
1298 INIT_DEBUGOUT("ixv_stop: begin\n");
1299 ixv_disable_intr(adapter);
1300
1301 /* Tell the stack that the interface is no longer active */
1302 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1303
1304 hw->mac.ops.reset_hw(hw);
1305 adapter->hw.adapter_stopped = FALSE;
1306 hw->mac.ops.stop_adapter(hw);
1307 callout_stop(&adapter->timer);
1308
1309 /* reprogram the RAR[0] in case user changed it. */
1310 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1311
1312 return;
1313 } /* ixv_stop */
1314
1315
1316 /************************************************************************
1317 * ixv_allocate_pci_resources
1318 ************************************************************************/
1319 static int
1320 ixv_allocate_pci_resources(struct adapter *adapter,
1321 const struct pci_attach_args *pa)
1322 {
1323 pcireg_t memtype;
1324 device_t dev = adapter->dev;
1325 bus_addr_t addr;
1326 int flags;
1327
1328 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1329 switch (memtype) {
1330 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1331 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1332 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1333 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1334 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1335 goto map_err;
1336 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1337 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1338 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1339 }
1340 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1341 adapter->osdep.mem_size, flags,
1342 &adapter->osdep.mem_bus_space_handle) != 0) {
1343 map_err:
1344 adapter->osdep.mem_size = 0;
1345 aprint_error_dev(dev, "unable to map BAR0\n");
1346 return ENXIO;
1347 }
1348 break;
1349 default:
1350 aprint_error_dev(dev, "unexpected type on BAR0\n");
1351 return ENXIO;
1352 }
1353
1354 /* Pick up the tuneable queues */
1355 adapter->num_queues = ixv_num_queues;
1356
1357 return (0);
1358 } /* ixv_allocate_pci_resources */
1359
1360 /************************************************************************
1361 * ixv_free_pci_resources
1362 ************************************************************************/
1363 static void
1364 ixv_free_pci_resources(struct adapter * adapter)
1365 {
1366 struct ix_queue *que = adapter->queues;
1367 int rid;
1368
1369 /*
1370 * Release all msix queue resources:
1371 */
1372 for (int i = 0; i < adapter->num_queues; i++, que++) {
1373 if (que->res != NULL)
1374 pci_intr_disestablish(adapter->osdep.pc,
1375 adapter->osdep.ihs[i]);
1376 }
1377
1378
1379 /* Clean the Mailbox interrupt last */
1380 rid = adapter->vector;
1381
1382 if (adapter->osdep.ihs[rid] != NULL) {
1383 pci_intr_disestablish(adapter->osdep.pc,
1384 adapter->osdep.ihs[rid]);
1385 adapter->osdep.ihs[rid] = NULL;
1386 }
1387
1388 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1389 adapter->osdep.nintrs);
1390
1391 if (adapter->osdep.mem_size != 0) {
1392 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1393 adapter->osdep.mem_bus_space_handle,
1394 adapter->osdep.mem_size);
1395 }
1396
1397 return;
1398 } /* ixv_free_pci_resources */
1399
1400 /************************************************************************
1401 * ixv_setup_interface
1402 *
1403 * Setup networking device structure and register an interface.
1404 ************************************************************************/
1405 static int
1406 ixv_setup_interface(device_t dev, struct adapter *adapter)
1407 {
1408 struct ethercom *ec = &adapter->osdep.ec;
1409 struct ifnet *ifp;
1410 int rv;
1411
1412 INIT_DEBUGOUT("ixv_setup_interface: begin");
1413
1414 ifp = adapter->ifp = &ec->ec_if;
1415 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1416 ifp->if_baudrate = IF_Gbps(10);
1417 ifp->if_init = ixv_init;
1418 ifp->if_stop = ixv_ifstop;
1419 ifp->if_softc = adapter;
1420 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1421 #ifdef IXGBE_MPSAFE
1422 ifp->if_extflags = IFEF_MPSAFE;
1423 #endif
1424 ifp->if_ioctl = ixv_ioctl;
1425 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1426 #if 0
1427 ixv_start_locked = ixgbe_legacy_start_locked;
1428 #endif
1429 } else {
1430 ifp->if_transmit = ixgbe_mq_start;
1431 #if 0
1432 ixv_start_locked = ixgbe_mq_start_locked;
1433 #endif
1434 }
1435 ifp->if_start = ixgbe_legacy_start;
1436 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1437 IFQ_SET_READY(&ifp->if_snd);
1438
1439 rv = if_initialize(ifp);
1440 if (rv != 0) {
1441 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1442 return rv;
1443 }
1444 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1445 ether_ifattach(ifp, adapter->hw.mac.addr);
1446 /*
1447 * We use per TX queue softint, so if_deferred_start_init() isn't
1448 * used.
1449 */
1450 if_register(ifp);
1451 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1452
1453 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1454
1455 /*
1456 * Tell the upper layer(s) we support long frames.
1457 */
1458 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1459
1460 /* Set capability flags */
1461 ifp->if_capabilities |= IFCAP_HWCSUM
1462 | IFCAP_TSOv4
1463 | IFCAP_TSOv6;
1464 ifp->if_capenable = 0;
1465
1466 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1467 | ETHERCAP_VLAN_HWCSUM
1468 | ETHERCAP_JUMBO_MTU
1469 | ETHERCAP_VLAN_MTU;
1470
1471 /* Enable the above capabilities by default */
1472 ec->ec_capenable = ec->ec_capabilities;
1473
1474 /* Don't enable LRO by default */
1475 ifp->if_capabilities |= IFCAP_LRO;
1476 #if 0
1477 ifp->if_capenable = ifp->if_capabilities;
1478 #endif
1479
1480 /*
1481 * Specify the media types supported by this adapter and register
1482 * callbacks to update media and link information
1483 */
1484 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1485 ixv_media_status);
1486 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1487 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1488
1489 return 0;
1490 } /* ixv_setup_interface */
1491
1492
1493 /************************************************************************
1494 * ixv_initialize_transmit_units - Enable transmit unit.
1495 ************************************************************************/
1496 static void
1497 ixv_initialize_transmit_units(struct adapter *adapter)
1498 {
1499 struct tx_ring *txr = adapter->tx_rings;
1500 struct ixgbe_hw *hw = &adapter->hw;
1501
1502
1503 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1504 u64 tdba = txr->txdma.dma_paddr;
1505 u32 txctrl, txdctl;
1506
1507 /* Set WTHRESH to 8, burst writeback */
1508 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1509 txdctl |= (8 << 16);
1510 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1511
1512 /* Set the HW Tx Head and Tail indices */
1513 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1514 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1515
1516 /* Set Tx Tail register */
1517 txr->tail = IXGBE_VFTDT(i);
1518
1519 /* Set Ring parameters */
1520 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1521 (tdba & 0x00000000ffffffffULL));
1522 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1523 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1524 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1525 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1526 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1527 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1528
1529 /* Now enable */
1530 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1531 txdctl |= IXGBE_TXDCTL_ENABLE;
1532 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1533 }
1534
1535 return;
1536 } /* ixv_initialize_transmit_units */
1537
1538
1539 /************************************************************************
1540 * ixv_initialize_rss_mapping
1541 ************************************************************************/
1542 static void
1543 ixv_initialize_rss_mapping(struct adapter *adapter)
1544 {
1545 struct ixgbe_hw *hw = &adapter->hw;
1546 u32 reta = 0, mrqc, rss_key[10];
1547 int queue_id;
1548 int i, j;
1549 u32 rss_hash_config;
1550
1551 /* force use default RSS key. */
1552 #ifdef __NetBSD__
1553 rss_getkey((uint8_t *) &rss_key);
1554 #else
1555 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1556 /* Fetch the configured RSS key */
1557 rss_getkey((uint8_t *)&rss_key);
1558 } else {
1559 /* set up random bits */
1560 cprng_fast(&rss_key, sizeof(rss_key));
1561 }
1562 #endif
1563
1564 /* Now fill out hash function seeds */
1565 for (i = 0; i < 10; i++)
1566 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1567
1568 /* Set up the redirection table */
1569 for (i = 0, j = 0; i < 64; i++, j++) {
1570 if (j == adapter->num_queues)
1571 j = 0;
1572
1573 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1574 /*
1575 * Fetch the RSS bucket id for the given indirection
1576 * entry. Cap it at the number of configured buckets
1577 * (which is num_queues.)
1578 */
1579 queue_id = rss_get_indirection_to_bucket(i);
1580 queue_id = queue_id % adapter->num_queues;
1581 } else
1582 queue_id = j;
1583
1584 /*
1585 * The low 8 bits are for hash value (n+0);
1586 * The next 8 bits are for hash value (n+1), etc.
1587 */
1588 reta >>= 8;
1589 reta |= ((uint32_t)queue_id) << 24;
1590 if ((i & 3) == 3) {
1591 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1592 reta = 0;
1593 }
1594 }
1595
1596 /* Perform hash on these packet types */
1597 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1598 rss_hash_config = rss_gethashconfig();
1599 else {
1600 /*
1601 * Disable UDP - IP fragments aren't currently being handled
1602 * and so we end up with a mix of 2-tuple and 4-tuple
1603 * traffic.
1604 */
1605 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1606 | RSS_HASHTYPE_RSS_TCP_IPV4
1607 | RSS_HASHTYPE_RSS_IPV6
1608 | RSS_HASHTYPE_RSS_TCP_IPV6;
1609 }
1610
1611 mrqc = IXGBE_MRQC_RSSEN;
1612 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1613 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1614 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1615 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1616 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1617 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1618 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1619 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1620 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1621 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1622 __func__);
1623 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1624 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1625 __func__);
1626 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1627 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1628 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1629 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1630 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1631 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1632 __func__);
1633 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1634 } /* ixv_initialize_rss_mapping */
1635
1636
1637 /************************************************************************
1638 * ixv_initialize_receive_units - Setup receive registers and features.
1639 ************************************************************************/
1640 static void
1641 ixv_initialize_receive_units(struct adapter *adapter)
1642 {
1643 struct rx_ring *rxr = adapter->rx_rings;
1644 struct ixgbe_hw *hw = &adapter->hw;
1645 struct ifnet *ifp = adapter->ifp;
1646 u32 bufsz, rxcsum, psrtype;
1647
1648 if (ifp->if_mtu > ETHERMTU)
1649 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1650 else
1651 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1652
1653 psrtype = IXGBE_PSRTYPE_TCPHDR
1654 | IXGBE_PSRTYPE_UDPHDR
1655 | IXGBE_PSRTYPE_IPV4HDR
1656 | IXGBE_PSRTYPE_IPV6HDR
1657 | IXGBE_PSRTYPE_L2HDR;
1658
1659 if (adapter->num_queues > 1)
1660 psrtype |= 1 << 29;
1661
1662 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1663
1664 /* Tell PF our max_frame size */
1665 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1666 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1667 }
1668
1669 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1670 u64 rdba = rxr->rxdma.dma_paddr;
1671 u32 reg, rxdctl;
1672
1673 /* Disable the queue */
1674 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1675 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1676 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1677 for (int j = 0; j < 10; j++) {
1678 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1679 IXGBE_RXDCTL_ENABLE)
1680 msec_delay(1);
1681 else
1682 break;
1683 }
1684 wmb();
1685 /* Setup the Base and Length of the Rx Descriptor Ring */
1686 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1687 (rdba & 0x00000000ffffffffULL));
1688 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1689 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1690 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1691
1692 /* Reset the ring indices */
1693 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1694 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1695
1696 /* Set up the SRRCTL register */
1697 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1698 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1699 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1700 reg |= bufsz;
1701 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1702 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1703
1704 /* Capture Rx Tail index */
1705 rxr->tail = IXGBE_VFRDT(rxr->me);
1706
1707 /* Do the queue enabling last */
1708 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1709 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1710 for (int k = 0; k < 10; k++) {
1711 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1712 IXGBE_RXDCTL_ENABLE)
1713 break;
1714 msec_delay(1);
1715 }
1716 wmb();
1717
1718 /* Set the Tail Pointer */
1719 /*
1720 * In netmap mode, we must preserve the buffers made
1721 * available to userspace before the if_init()
1722 * (this is true by default on the TX side, because
1723 * init makes all buffers available to userspace).
1724 *
1725 * netmap_reset() and the device specific routines
1726 * (e.g. ixgbe_setup_receive_rings()) map these
1727 * buffers at the end of the NIC ring, so here we
1728 * must set the RDT (tail) register to make sure
1729 * they are not overwritten.
1730 *
1731 * In this driver the NIC ring starts at RDH = 0,
1732 * RDT points to the last slot available for reception (?),
1733 * so RDT = num_rx_desc - 1 means the whole ring is available.
1734 */
1735 #ifdef DEV_NETMAP
1736 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1737 (ifp->if_capenable & IFCAP_NETMAP)) {
1738 struct netmap_adapter *na = NA(adapter->ifp);
1739 struct netmap_kring *kring = &na->rx_rings[i];
1740 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1741
1742 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1743 } else
1744 #endif /* DEV_NETMAP */
1745 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1746 adapter->num_rx_desc - 1);
1747 }
1748
1749 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1750
1751 ixv_initialize_rss_mapping(adapter);
1752
1753 if (adapter->num_queues > 1) {
1754 /* RSS and RX IPP Checksum are mutually exclusive */
1755 rxcsum |= IXGBE_RXCSUM_PCSD;
1756 }
1757
1758 if (ifp->if_capenable & IFCAP_RXCSUM)
1759 rxcsum |= IXGBE_RXCSUM_PCSD;
1760
1761 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1762 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1763
1764 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1765
1766 return;
1767 } /* ixv_initialize_receive_units */
1768
1769 /************************************************************************
1770 * ixv_setup_vlan_support
1771 ************************************************************************/
1772 static void
1773 ixv_setup_vlan_support(struct adapter *adapter)
1774 {
1775 struct ethercom *ec = &adapter->osdep.ec;
1776 struct ixgbe_hw *hw = &adapter->hw;
1777 struct rx_ring *rxr;
1778 u32 ctrl, vid, vfta, retry;
1779
1780 /*
1781 * We get here thru init_locked, meaning
1782 * a soft reset, this has already cleared
1783 * the VFTA and other state, so if there
1784 * have been no vlan's registered do nothing.
1785 */
1786 if (!VLAN_ATTACHED(ec))
1787 return;
1788
1789 /* Enable the queues */
1790 for (int i = 0; i < adapter->num_queues; i++) {
1791 rxr = &adapter->rx_rings[i];
1792 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1793 ctrl |= IXGBE_RXDCTL_VME;
1794 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1795 /*
1796 * Let Rx path know that it needs to store VLAN tag
1797 * as part of extra mbuf info.
1798 */
1799 rxr->vtag_strip = TRUE;
1800 }
1801
1802 #if 1
1803 /* XXX dirty hack. Enable all VIDs */
1804 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1805 adapter->shadow_vfta[i] = 0xffffffff;
1806 #endif
1807 /*
1808 * A soft reset zero's out the VFTA, so
1809 * we need to repopulate it now.
1810 */
1811 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1812 if (adapter->shadow_vfta[i] == 0)
1813 continue;
1814 vfta = adapter->shadow_vfta[i];
1815 /*
1816 * Reconstruct the vlan id's
1817 * based on the bits set in each
1818 * of the array ints.
1819 */
1820 for (int j = 0; j < 32; j++) {
1821 retry = 0;
1822 if ((vfta & (1 << j)) == 0)
1823 continue;
1824 vid = (i * 32) + j;
1825 /* Call the shared code mailbox routine */
1826 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1827 if (++retry > 5)
1828 break;
1829 }
1830 }
1831 }
1832 } /* ixv_setup_vlan_support */
1833
1834 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1835 /************************************************************************
1836 * ixv_register_vlan
1837 *
1838 * Run via a vlan config EVENT, it enables us to use the
1839 * HW Filter table since we can get the vlan id. This just
1840 * creates the entry in the soft version of the VFTA, init
1841 * will repopulate the real table.
1842 ************************************************************************/
1843 static void
1844 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1845 {
1846 struct adapter *adapter = ifp->if_softc;
1847 u16 index, bit;
1848
1849 if (ifp->if_softc != arg) /* Not our event */
1850 return;
1851
1852 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1853 return;
1854
1855 IXGBE_CORE_LOCK(adapter);
1856 index = (vtag >> 5) & 0x7F;
1857 bit = vtag & 0x1F;
1858 adapter->shadow_vfta[index] |= (1 << bit);
1859 /* Re-init to load the changes */
1860 ixv_init_locked(adapter);
1861 IXGBE_CORE_UNLOCK(adapter);
1862 } /* ixv_register_vlan */
1863
1864 /************************************************************************
1865 * ixv_unregister_vlan
1866 *
1867 * Run via a vlan unconfig EVENT, remove our entry
1868 * in the soft vfta.
1869 ************************************************************************/
1870 static void
1871 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1872 {
1873 struct adapter *adapter = ifp->if_softc;
1874 u16 index, bit;
1875
1876 if (ifp->if_softc != arg)
1877 return;
1878
1879 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1880 return;
1881
1882 IXGBE_CORE_LOCK(adapter);
1883 index = (vtag >> 5) & 0x7F;
1884 bit = vtag & 0x1F;
1885 adapter->shadow_vfta[index] &= ~(1 << bit);
1886 /* Re-init to load the changes */
1887 ixv_init_locked(adapter);
1888 IXGBE_CORE_UNLOCK(adapter);
1889 } /* ixv_unregister_vlan */
1890 #endif
1891
1892 /************************************************************************
1893 * ixv_enable_intr
1894 ************************************************************************/
1895 static void
1896 ixv_enable_intr(struct adapter *adapter)
1897 {
1898 struct ixgbe_hw *hw = &adapter->hw;
1899 struct ix_queue *que = adapter->queues;
1900 u32 mask;
1901 int i;
1902
1903 /* For VTEIAC */
1904 mask = (1 << adapter->vector);
1905 for (i = 0; i < adapter->num_queues; i++, que++)
1906 mask |= (1 << que->msix);
1907 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1908
1909 /* For VTEIMS */
1910 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
1911 que = adapter->queues;
1912 for (i = 0; i < adapter->num_queues; i++, que++)
1913 ixv_enable_queue(adapter, que->msix);
1914
1915 IXGBE_WRITE_FLUSH(hw);
1916
1917 return;
1918 } /* ixv_enable_intr */
1919
1920 /************************************************************************
1921 * ixv_disable_intr
1922 ************************************************************************/
1923 static void
1924 ixv_disable_intr(struct adapter *adapter)
1925 {
1926 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1927 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1928 IXGBE_WRITE_FLUSH(&adapter->hw);
1929
1930 return;
1931 } /* ixv_disable_intr */
1932
1933 /************************************************************************
1934 * ixv_set_ivar
1935 *
1936 * Setup the correct IVAR register for a particular MSI-X interrupt
1937 * - entry is the register array entry
1938 * - vector is the MSI-X vector for this queue
1939 * - type is RX/TX/MISC
1940 ************************************************************************/
1941 static void
1942 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1943 {
1944 struct ixgbe_hw *hw = &adapter->hw;
1945 u32 ivar, index;
1946
1947 vector |= IXGBE_IVAR_ALLOC_VAL;
1948
1949 if (type == -1) { /* MISC IVAR */
1950 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1951 ivar &= ~0xFF;
1952 ivar |= vector;
1953 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1954 } else { /* RX/TX IVARS */
1955 index = (16 * (entry & 1)) + (8 * type);
1956 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1957 ivar &= ~(0xFF << index);
1958 ivar |= (vector << index);
1959 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1960 }
1961 } /* ixv_set_ivar */
1962
1963 /************************************************************************
1964 * ixv_configure_ivars
1965 ************************************************************************/
1966 static void
1967 ixv_configure_ivars(struct adapter *adapter)
1968 {
1969 struct ix_queue *que = adapter->queues;
1970
1971 /* XXX We should sync EITR value calculation with ixgbe.c? */
1972
1973 for (int i = 0; i < adapter->num_queues; i++, que++) {
1974 /* First the RX queue entry */
1975 ixv_set_ivar(adapter, i, que->msix, 0);
1976 /* ... and the TX */
1977 ixv_set_ivar(adapter, i, que->msix, 1);
1978 /* Set an initial value in EITR */
1979 ixv_eitr_write(que, IXGBE_EITR_DEFAULT);
1980 }
1981
1982 /* For the mailbox interrupt */
1983 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1984 } /* ixv_configure_ivars */
1985
1986
1987 /************************************************************************
1988 * ixv_save_stats
1989 *
1990 * The VF stats registers never have a truly virgin
1991 * starting point, so this routine tries to make an
1992 * artificial one, marking ground zero on attach as
1993 * it were.
1994 ************************************************************************/
1995 static void
1996 ixv_save_stats(struct adapter *adapter)
1997 {
1998 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1999
2000 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2001 stats->saved_reset_vfgprc +=
2002 stats->vfgprc.ev_count - stats->base_vfgprc;
2003 stats->saved_reset_vfgptc +=
2004 stats->vfgptc.ev_count - stats->base_vfgptc;
2005 stats->saved_reset_vfgorc +=
2006 stats->vfgorc.ev_count - stats->base_vfgorc;
2007 stats->saved_reset_vfgotc +=
2008 stats->vfgotc.ev_count - stats->base_vfgotc;
2009 stats->saved_reset_vfmprc +=
2010 stats->vfmprc.ev_count - stats->base_vfmprc;
2011 }
2012 } /* ixv_save_stats */
2013
2014 /************************************************************************
2015 * ixv_init_stats
2016 ************************************************************************/
2017 static void
2018 ixv_init_stats(struct adapter *adapter)
2019 {
2020 struct ixgbe_hw *hw = &adapter->hw;
2021
2022 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2023 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2024 adapter->stats.vf.last_vfgorc |=
2025 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2026
2027 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2028 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2029 adapter->stats.vf.last_vfgotc |=
2030 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2031
2032 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2033
2034 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2035 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2036 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2037 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2038 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2039 } /* ixv_init_stats */
2040
2041 #define UPDATE_STAT_32(reg, last, count) \
2042 { \
2043 u32 current = IXGBE_READ_REG(hw, (reg)); \
2044 if (current < (last)) \
2045 count.ev_count += 0x100000000LL; \
2046 (last) = current; \
2047 count.ev_count &= 0xFFFFFFFF00000000LL; \
2048 count.ev_count |= current; \
2049 }
2050
2051 #define UPDATE_STAT_36(lsb, msb, last, count) \
2052 { \
2053 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2054 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2055 u64 current = ((cur_msb << 32) | cur_lsb); \
2056 if (current < (last)) \
2057 count.ev_count += 0x1000000000LL; \
2058 (last) = current; \
2059 count.ev_count &= 0xFFFFFFF000000000LL; \
2060 count.ev_count |= current; \
2061 }
2062
2063 /************************************************************************
2064 * ixv_update_stats - Update the board statistics counters.
2065 ************************************************************************/
2066 void
2067 ixv_update_stats(struct adapter *adapter)
2068 {
2069 struct ixgbe_hw *hw = &adapter->hw;
2070 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2071
2072 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2073 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2074 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2075 stats->vfgorc);
2076 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2077 stats->vfgotc);
2078 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2079
2080 /* Fill out the OS statistics structure */
2081 /*
2082 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2083 * adapter->stats counters. It's required to make ifconfig -z
2084 * (SOICZIFDATA) work.
2085 */
2086 } /* ixv_update_stats */
2087
2088 const struct sysctlnode *
2089 ixv_sysctl_instance(struct adapter *adapter)
2090 {
2091 const char *dvname;
2092 struct sysctllog **log;
2093 int rc;
2094 const struct sysctlnode *rnode;
2095
2096 log = &adapter->sysctllog;
2097 dvname = device_xname(adapter->dev);
2098
2099 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2100 0, CTLTYPE_NODE, dvname,
2101 SYSCTL_DESCR("ixv information and settings"),
2102 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2103 goto err;
2104
2105 return rnode;
2106 err:
2107 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2108 return NULL;
2109 }
2110
2111 static void
2112 ixv_add_device_sysctls(struct adapter *adapter)
2113 {
2114 struct sysctllog **log;
2115 const struct sysctlnode *rnode, *cnode;
2116 device_t dev;
2117
2118 dev = adapter->dev;
2119 log = &adapter->sysctllog;
2120
2121 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2122 aprint_error_dev(dev, "could not create sysctl root\n");
2123 return;
2124 }
2125
2126 if (sysctl_createv(log, 0, &rnode, &cnode,
2127 CTLFLAG_READWRITE, CTLTYPE_INT,
2128 "debug", SYSCTL_DESCR("Debug Info"),
2129 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2130 aprint_error_dev(dev, "could not create sysctl\n");
2131
2132 if (sysctl_createv(log, 0, &rnode, &cnode,
2133 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2134 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2135 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2136 aprint_error_dev(dev, "could not create sysctl\n");
2137 }
2138
2139 /************************************************************************
2140 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2141 ************************************************************************/
2142 static void
2143 ixv_add_stats_sysctls(struct adapter *adapter)
2144 {
2145 device_t dev = adapter->dev;
2146 struct tx_ring *txr = adapter->tx_rings;
2147 struct rx_ring *rxr = adapter->rx_rings;
2148 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2149 struct ixgbe_hw *hw = &adapter->hw;
2150 const struct sysctlnode *rnode;
2151 struct sysctllog **log = &adapter->sysctllog;
2152 const char *xname = device_xname(dev);
2153
2154 /* Driver Statistics */
2155 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2156 NULL, xname, "Handled queue in softint");
2157 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2158 NULL, xname, "Requeued in softint");
2159 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2160 NULL, xname, "Driver tx dma soft fail EFBIG");
2161 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2162 NULL, xname, "m_defrag() failed");
2163 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2164 NULL, xname, "Driver tx dma hard fail EFBIG");
2165 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2166 NULL, xname, "Driver tx dma hard fail EINVAL");
2167 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2168 NULL, xname, "Driver tx dma hard fail other");
2169 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2170 NULL, xname, "Driver tx dma soft fail EAGAIN");
2171 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2172 NULL, xname, "Driver tx dma soft fail ENOMEM");
2173 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2174 NULL, xname, "Watchdog timeouts");
2175 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2176 NULL, xname, "TSO errors");
2177 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2178 NULL, xname, "Link MSI-X IRQ Handled");
2179
2180 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2181 snprintf(adapter->queues[i].evnamebuf,
2182 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2183 xname, i);
2184 snprintf(adapter->queues[i].namebuf,
2185 sizeof(adapter->queues[i].namebuf), "q%d", i);
2186
2187 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2188 aprint_error_dev(dev, "could not create sysctl root\n");
2189 break;
2190 }
2191
2192 if (sysctl_createv(log, 0, &rnode, &rnode,
2193 0, CTLTYPE_NODE,
2194 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2195 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2196 break;
2197
2198 #if 0 /* not yet */
2199 if (sysctl_createv(log, 0, &rnode, &cnode,
2200 CTLFLAG_READWRITE, CTLTYPE_INT,
2201 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2202 ixgbe_sysctl_interrupt_rate_handler, 0,
2203 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2204 break;
2205
2206 if (sysctl_createv(log, 0, &rnode, &cnode,
2207 CTLFLAG_READONLY, CTLTYPE_QUAD,
2208 "irqs", SYSCTL_DESCR("irqs on this queue"),
2209 NULL, 0, &(adapter->queues[i].irqs),
2210 0, CTL_CREATE, CTL_EOL) != 0)
2211 break;
2212
2213 if (sysctl_createv(log, 0, &rnode, &cnode,
2214 CTLFLAG_READONLY, CTLTYPE_INT,
2215 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2216 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
2217 0, CTL_CREATE, CTL_EOL) != 0)
2218 break;
2219
2220 if (sysctl_createv(log, 0, &rnode, &cnode,
2221 CTLFLAG_READONLY, CTLTYPE_INT,
2222 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2223 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
2224 0, CTL_CREATE, CTL_EOL) != 0)
2225 break;
2226 #endif
2227 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2228 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2229 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2230 NULL, adapter->queues[i].evnamebuf, "TSO");
2231 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2232 NULL, adapter->queues[i].evnamebuf,
2233 "Queue No Descriptor Available");
2234 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2235 NULL, adapter->queues[i].evnamebuf,
2236 "Queue Packets Transmitted");
2237 #ifndef IXGBE_LEGACY_TX
2238 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2239 NULL, adapter->queues[i].evnamebuf,
2240 "Packets dropped in pcq");
2241 #endif
2242
2243 #ifdef LRO
2244 struct lro_ctrl *lro = &rxr->lro;
2245 #endif /* LRO */
2246
2247 #if 0 /* not yet */
2248 if (sysctl_createv(log, 0, &rnode, &cnode,
2249 CTLFLAG_READONLY,
2250 CTLTYPE_INT,
2251 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2252 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
2253 CTL_CREATE, CTL_EOL) != 0)
2254 break;
2255
2256 if (sysctl_createv(log, 0, &rnode, &cnode,
2257 CTLFLAG_READONLY,
2258 CTLTYPE_INT,
2259 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2260 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
2261 CTL_CREATE, CTL_EOL) != 0)
2262 break;
2263 #endif
2264
2265 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2266 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2267 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2268 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2269 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2270 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2271 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2272 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2273 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2274 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2275 #ifdef LRO
2276 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2277 CTLFLAG_RD, &lro->lro_queued, 0,
2278 "LRO Queued");
2279 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2280 CTLFLAG_RD, &lro->lro_flushed, 0,
2281 "LRO Flushed");
2282 #endif /* LRO */
2283 }
2284
2285 /* MAC stats get their own sub node */
2286
2287 snprintf(stats->namebuf,
2288 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2289
2290 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2291 stats->namebuf, "rx csum offload - IP");
2292 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2293 stats->namebuf, "rx csum offload - L4");
2294 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2295 stats->namebuf, "rx csum offload - IP bad");
2296 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2297 stats->namebuf, "rx csum offload - L4 bad");
2298
2299 /* Packet Reception Stats */
2300 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2301 xname, "Good Packets Received");
2302 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2303 xname, "Good Octets Received");
2304 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2305 xname, "Multicast Packets Received");
2306 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2307 xname, "Good Packets Transmitted");
2308 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2309 xname, "Good Octets Transmitted");
2310
2311 /* Mailbox Stats */
2312 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2313 xname, "message TXs");
2314 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2315 xname, "message RXs");
2316 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2317 xname, "ACKs");
2318 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2319 xname, "REQs");
2320 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2321 xname, "RSTs");
2322
2323 } /* ixv_add_stats_sysctls */
2324
2325 /************************************************************************
2326 * ixv_set_sysctl_value
2327 ************************************************************************/
2328 static void
2329 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2330 const char *description, int *limit, int value)
2331 {
2332 device_t dev = adapter->dev;
2333 struct sysctllog **log;
2334 const struct sysctlnode *rnode, *cnode;
2335
2336 log = &adapter->sysctllog;
2337 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2338 aprint_error_dev(dev, "could not create sysctl root\n");
2339 return;
2340 }
2341 if (sysctl_createv(log, 0, &rnode, &cnode,
2342 CTLFLAG_READWRITE, CTLTYPE_INT,
2343 name, SYSCTL_DESCR(description),
2344 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2345 aprint_error_dev(dev, "could not create sysctl\n");
2346 *limit = value;
2347 } /* ixv_set_sysctl_value */
2348
2349 /************************************************************************
2350 * ixv_print_debug_info
2351 *
2352 * Called only when em_display_debug_stats is enabled.
2353 * Provides a way to take a look at important statistics
2354 * maintained by the driver and hardware.
2355 ************************************************************************/
2356 static void
2357 ixv_print_debug_info(struct adapter *adapter)
2358 {
2359 device_t dev = adapter->dev;
2360 struct ixgbe_hw *hw = &adapter->hw;
2361 struct ix_queue *que = adapter->queues;
2362 struct rx_ring *rxr;
2363 struct tx_ring *txr;
2364 #ifdef LRO
2365 struct lro_ctrl *lro;
2366 #endif /* LRO */
2367
2368 device_printf(dev, "Error Byte Count = %u \n",
2369 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2370
2371 for (int i = 0; i < adapter->num_queues; i++, que++) {
2372 txr = que->txr;
2373 rxr = que->rxr;
2374 #ifdef LRO
2375 lro = &rxr->lro;
2376 #endif /* LRO */
2377 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2378 que->msix, (long)que->irqs.ev_count);
2379 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2380 rxr->me, (long long)rxr->rx_packets.ev_count);
2381 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2382 rxr->me, (long)rxr->rx_bytes.ev_count);
2383 #ifdef LRO
2384 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2385 rxr->me, (long long)lro->lro_queued);
2386 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2387 rxr->me, (long long)lro->lro_flushed);
2388 #endif /* LRO */
2389 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2390 txr->me, (long)txr->total_packets.ev_count);
2391 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2392 txr->me, (long)txr->no_desc_avail.ev_count);
2393 }
2394
2395 device_printf(dev, "MBX IRQ Handled: %lu\n",
2396 (long)adapter->link_irq.ev_count);
2397 } /* ixv_print_debug_info */
2398
2399 /************************************************************************
2400 * ixv_sysctl_debug
2401 ************************************************************************/
2402 static int
2403 ixv_sysctl_debug(SYSCTLFN_ARGS)
2404 {
2405 struct sysctlnode node;
2406 struct adapter *adapter;
2407 int error, result;
2408
2409 node = *rnode;
2410 node.sysctl_data = &result;
2411 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2412
2413 if (error || newp == NULL)
2414 return error;
2415
2416 if (result == 1) {
2417 adapter = (struct adapter *)node.sysctl_data;
2418 ixv_print_debug_info(adapter);
2419 }
2420
2421 return 0;
2422 } /* ixv_sysctl_debug */
2423
2424 /************************************************************************
2425 * ixv_init_device_features
2426 ************************************************************************/
2427 static void
2428 ixv_init_device_features(struct adapter *adapter)
2429 {
2430 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2431 | IXGBE_FEATURE_VF
2432 | IXGBE_FEATURE_RSS
2433 | IXGBE_FEATURE_LEGACY_TX;
2434
2435 /* A tad short on feature flags for VFs, atm. */
2436 switch (adapter->hw.mac.type) {
2437 case ixgbe_mac_82599_vf:
2438 break;
2439 case ixgbe_mac_X540_vf:
2440 break;
2441 case ixgbe_mac_X550_vf:
2442 case ixgbe_mac_X550EM_x_vf:
2443 case ixgbe_mac_X550EM_a_vf:
2444 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2445 break;
2446 default:
2447 break;
2448 }
2449
2450 /* Enabled by default... */
2451 /* Is a virtual function (VF) */
2452 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2453 adapter->feat_en |= IXGBE_FEATURE_VF;
2454 /* Netmap */
2455 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2456 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2457 /* Receive-Side Scaling (RSS) */
2458 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2459 adapter->feat_en |= IXGBE_FEATURE_RSS;
2460 /* Needs advanced context descriptor regardless of offloads req'd */
2461 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2462 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2463
2464 /* Enabled via sysctl... */
2465 /* Legacy (single queue) transmit */
2466 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2467 ixv_enable_legacy_tx)
2468 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2469 } /* ixv_init_device_features */
2470
2471 /************************************************************************
2472 * ixv_shutdown - Shutdown entry point
2473 ************************************************************************/
2474 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2475 static int
2476 ixv_shutdown(device_t dev)
2477 {
2478 struct adapter *adapter = device_private(dev);
2479 IXGBE_CORE_LOCK(adapter);
2480 ixv_stop(adapter);
2481 IXGBE_CORE_UNLOCK(adapter);
2482
2483 return (0);
2484 } /* ixv_shutdown */
2485 #endif
2486
2487 static int
2488 ixv_ifflags_cb(struct ethercom *ec)
2489 {
2490 struct ifnet *ifp = &ec->ec_if;
2491 struct adapter *adapter = ifp->if_softc;
2492 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2493
2494 IXGBE_CORE_LOCK(adapter);
2495
2496 if (change != 0)
2497 adapter->if_flags = ifp->if_flags;
2498
2499 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2500 rc = ENETRESET;
2501
2502 /* Set up VLAN support and filter */
2503 ixv_setup_vlan_support(adapter);
2504
2505 IXGBE_CORE_UNLOCK(adapter);
2506
2507 return rc;
2508 }
2509
2510
2511 /************************************************************************
2512 * ixv_ioctl - Ioctl entry point
2513 *
2514 * Called when the user wants to configure the interface.
2515 *
2516 * return 0 on success, positive on failure
2517 ************************************************************************/
2518 static int
2519 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2520 {
2521 struct adapter *adapter = ifp->if_softc;
2522 struct ifcapreq *ifcr = data;
2523 struct ifreq *ifr = data;
2524 int error = 0;
2525 int l4csum_en;
2526 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2527 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2528
2529 switch (command) {
2530 case SIOCSIFFLAGS:
2531 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2532 break;
2533 case SIOCADDMULTI:
2534 case SIOCDELMULTI:
2535 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2536 break;
2537 case SIOCSIFMEDIA:
2538 case SIOCGIFMEDIA:
2539 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2540 break;
2541 case SIOCSIFCAP:
2542 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2543 break;
2544 case SIOCSIFMTU:
2545 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2546 break;
2547 default:
2548 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2549 break;
2550 }
2551
2552 switch (command) {
2553 case SIOCSIFMEDIA:
2554 case SIOCGIFMEDIA:
2555 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2556 case SIOCSIFCAP:
2557 /* Layer-4 Rx checksum offload has to be turned on and
2558 * off as a unit.
2559 */
2560 l4csum_en = ifcr->ifcr_capenable & l4csum;
2561 if (l4csum_en != l4csum && l4csum_en != 0)
2562 return EINVAL;
2563 /*FALLTHROUGH*/
2564 case SIOCADDMULTI:
2565 case SIOCDELMULTI:
2566 case SIOCSIFFLAGS:
2567 case SIOCSIFMTU:
2568 default:
2569 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2570 return error;
2571 if ((ifp->if_flags & IFF_RUNNING) == 0)
2572 ;
2573 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2574 IXGBE_CORE_LOCK(adapter);
2575 ixv_init_locked(adapter);
2576 IXGBE_CORE_UNLOCK(adapter);
2577 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2578 /*
2579 * Multicast list has changed; set the hardware filter
2580 * accordingly.
2581 */
2582 IXGBE_CORE_LOCK(adapter);
2583 ixv_disable_intr(adapter);
2584 ixv_set_multi(adapter);
2585 ixv_enable_intr(adapter);
2586 IXGBE_CORE_UNLOCK(adapter);
2587 }
2588 return 0;
2589 }
2590 } /* ixv_ioctl */
2591
2592 /************************************************************************
2593 * ixv_init
2594 ************************************************************************/
2595 static int
2596 ixv_init(struct ifnet *ifp)
2597 {
2598 struct adapter *adapter = ifp->if_softc;
2599
2600 IXGBE_CORE_LOCK(adapter);
2601 ixv_init_locked(adapter);
2602 IXGBE_CORE_UNLOCK(adapter);
2603
2604 return 0;
2605 } /* ixv_init */
2606
2607
2608 /************************************************************************
2609 * ixv_handle_que
2610 ************************************************************************/
2611 static void
2612 ixv_handle_que(void *context)
2613 {
2614 struct ix_queue *que = context;
2615 struct adapter *adapter = que->adapter;
2616 struct tx_ring *txr = que->txr;
2617 struct ifnet *ifp = adapter->ifp;
2618 bool more;
2619
2620 adapter->handleq.ev_count++;
2621
2622 if (ifp->if_flags & IFF_RUNNING) {
2623 more = ixgbe_rxeof(que);
2624 IXGBE_TX_LOCK(txr);
2625 ixgbe_txeof(txr);
2626 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2627 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2628 ixgbe_mq_start_locked(ifp, txr);
2629 /* Only for queue 0 */
2630 /* NetBSD still needs this for CBQ */
2631 if ((&adapter->queues[0] == que)
2632 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2633 ixgbe_legacy_start_locked(ifp, txr);
2634 IXGBE_TX_UNLOCK(txr);
2635 if (more) {
2636 adapter->req.ev_count++;
2637 softint_schedule(que->que_si);
2638 return;
2639 }
2640 }
2641
2642 /* Re-enable this interrupt */
2643 ixv_enable_queue(adapter, que->msix);
2644
2645 return;
2646 } /* ixv_handle_que */
2647
2648 /************************************************************************
2649 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2650 ************************************************************************/
2651 static int
2652 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2653 {
2654 device_t dev = adapter->dev;
2655 struct ix_queue *que = adapter->queues;
2656 struct tx_ring *txr = adapter->tx_rings;
2657 int error, msix_ctrl, rid, vector = 0;
2658 pci_chipset_tag_t pc;
2659 pcitag_t tag;
2660 char intrbuf[PCI_INTRSTR_LEN];
2661 char intr_xname[32];
2662 const char *intrstr = NULL;
2663 kcpuset_t *affinity;
2664 int cpu_id = 0;
2665
2666 pc = adapter->osdep.pc;
2667 tag = adapter->osdep.tag;
2668
2669 adapter->osdep.nintrs = adapter->num_queues + 1;
2670 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2671 adapter->osdep.nintrs) != 0) {
2672 aprint_error_dev(dev,
2673 "failed to allocate MSI-X interrupt\n");
2674 return (ENXIO);
2675 }
2676
2677 kcpuset_create(&affinity, false);
2678 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2679 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2680 device_xname(dev), i);
2681 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2682 sizeof(intrbuf));
2683 #ifdef IXGBE_MPSAFE
2684 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2685 true);
2686 #endif
2687 /* Set the handler function */
2688 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2689 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2690 intr_xname);
2691 if (que->res == NULL) {
2692 pci_intr_release(pc, adapter->osdep.intrs,
2693 adapter->osdep.nintrs);
2694 aprint_error_dev(dev,
2695 "Failed to register QUE handler\n");
2696 kcpuset_destroy(affinity);
2697 return (ENXIO);
2698 }
2699 que->msix = vector;
2700 adapter->active_queues |= (u64)(1 << que->msix);
2701
2702 cpu_id = i;
2703 /* Round-robin affinity */
2704 kcpuset_zero(affinity);
2705 kcpuset_set(affinity, cpu_id % ncpu);
2706 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2707 NULL);
2708 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2709 intrstr);
2710 if (error == 0)
2711 aprint_normal(", bound queue %d to cpu %d\n",
2712 i, cpu_id % ncpu);
2713 else
2714 aprint_normal("\n");
2715
2716 #ifndef IXGBE_LEGACY_TX
2717 txr->txr_si
2718 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2719 ixgbe_deferred_mq_start, txr);
2720 #endif
2721 que->que_si
2722 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2723 ixv_handle_que, que);
2724 if (que->que_si == NULL) {
2725 aprint_error_dev(dev,
2726 "could not establish software interrupt\n");
2727 }
2728 }
2729
2730 /* and Mailbox */
2731 cpu_id++;
2732 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2733 adapter->vector = vector;
2734 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2735 sizeof(intrbuf));
2736 #ifdef IXGBE_MPSAFE
2737 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2738 true);
2739 #endif
2740 /* Set the mbx handler function */
2741 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2742 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2743 intr_xname);
2744 if (adapter->osdep.ihs[vector] == NULL) {
2745 adapter->res = NULL;
2746 aprint_error_dev(dev, "Failed to register LINK handler\n");
2747 kcpuset_destroy(affinity);
2748 return (ENXIO);
2749 }
2750 /* Round-robin affinity */
2751 kcpuset_zero(affinity);
2752 kcpuset_set(affinity, cpu_id % ncpu);
2753 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2754
2755 aprint_normal_dev(dev,
2756 "for link, interrupting at %s", intrstr);
2757 if (error == 0)
2758 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2759 else
2760 aprint_normal("\n");
2761
2762 /* Tasklets for Mailbox */
2763 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2764 ixv_handle_link, adapter);
2765 /*
2766 * Due to a broken design QEMU will fail to properly
2767 * enable the guest for MSI-X unless the vectors in
2768 * the table are all set up, so we must rewrite the
2769 * ENABLE in the MSI-X control register again at this
2770 * point to cause it to successfully initialize us.
2771 */
2772 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2773 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2774 rid += PCI_MSIX_CTL;
2775 msix_ctrl = pci_conf_read(pc, tag, rid);
2776 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2777 pci_conf_write(pc, tag, rid, msix_ctrl);
2778 }
2779
2780 kcpuset_destroy(affinity);
2781 return (0);
2782 } /* ixv_allocate_msix */
2783
2784 /************************************************************************
2785 * ixv_configure_interrupts - Setup MSI-X resources
2786 *
2787 * Note: The VF device MUST use MSI-X, there is no fallback.
2788 ************************************************************************/
2789 static int
2790 ixv_configure_interrupts(struct adapter *adapter)
2791 {
2792 device_t dev = adapter->dev;
2793 int want, queues, msgs;
2794
2795 /* Must have at least 2 MSI-X vectors */
2796 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
2797 if (msgs < 2) {
2798 aprint_error_dev(dev, "MSIX config error\n");
2799 return (ENXIO);
2800 }
2801 msgs = MIN(msgs, IXG_MAX_NINTR);
2802
2803 /* Figure out a reasonable auto config value */
2804 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
2805
2806 if (ixv_num_queues != 0)
2807 queues = ixv_num_queues;
2808 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
2809 queues = IXGBE_VF_MAX_TX_QUEUES;
2810
2811 /*
2812 * Want vectors for the queues,
2813 * plus an additional for mailbox.
2814 */
2815 want = queues + 1;
2816 if (msgs >= want)
2817 msgs = want;
2818 else {
2819 aprint_error_dev(dev,
2820 "MSI-X Configuration Problem, "
2821 "%d vectors but %d queues wanted!\n",
2822 msgs, want);
2823 return -1;
2824 }
2825
2826 adapter->msix_mem = (void *)1; /* XXX */
2827 aprint_normal_dev(dev,
2828 "Using MSI-X interrupts with %d vectors\n", msgs);
2829 adapter->num_queues = queues;
2830
2831 return (0);
2832 } /* ixv_configure_interrupts */
2833
2834
2835 /************************************************************************
2836 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2837 *
2838 * Done outside of interrupt context since the driver might sleep
2839 ************************************************************************/
2840 static void
2841 ixv_handle_link(void *context)
2842 {
2843 struct adapter *adapter = context;
2844
2845 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2846 &adapter->link_up, FALSE);
2847 ixv_update_link_status(adapter);
2848 } /* ixv_handle_link */
2849
2850 /************************************************************************
2851 * ixv_check_link - Used in the local timer to poll for link changes
2852 ************************************************************************/
2853 static void
2854 ixv_check_link(struct adapter *adapter)
2855 {
2856 adapter->hw.mac.get_link_status = TRUE;
2857
2858 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2859 &adapter->link_up, FALSE);
2860 ixv_update_link_status(adapter);
2861 } /* ixv_check_link */
2862