ixv.c revision 1.137 1 /*$NetBSD: ixv.c,v 1.137 2019/09/13 08:09:24 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #ifdef _KERNEL_OPT
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_net_mpsafe.h"
41 #endif
42
43 #include "ixgbe.h"
44 #include "vlan.h"
45
46 /************************************************************************
47 * Driver version
48 ************************************************************************/
49 static const char ixv_driver_version[] = "2.0.1-k";
50 /* XXX NetBSD: + 1.5.17 */
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static s32 ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static int ixv_set_promisc(struct adapter *);
116 static void ixv_set_multi(struct adapter *);
117 static void ixv_update_link_status(struct adapter *);
118 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
119 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
120 static void ixv_configure_ivars(struct adapter *);
121 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
122 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
123
124 static void ixv_setup_vlan_tagging(struct adapter *);
125 static int ixv_setup_vlan_support(struct adapter *);
126 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
127 static int ixv_register_vlan(struct adapter *, u16);
128 static int ixv_unregister_vlan(struct adapter *, u16);
129
130 static void ixv_add_device_sysctls(struct adapter *);
131 static void ixv_save_stats(struct adapter *);
132 static void ixv_init_stats(struct adapter *);
133 static void ixv_update_stats(struct adapter *);
134 static void ixv_add_stats_sysctls(struct adapter *);
135 static void ixv_clear_evcnt(struct adapter *);
136
137 /* Sysctl handlers */
138 static void ixv_set_sysctl_value(struct adapter *, const char *,
139 const char *, int *, int);
140 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
144 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
145 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
146
147 /* The MSI-X Interrupt handlers */
148 static int ixv_msix_que(void *);
149 static int ixv_msix_mbx(void *);
150
151 /* Deferred interrupt tasklets */
152 static void ixv_handle_que(void *);
153 static void ixv_handle_link(void *);
154
155 /* Workqueue handler for deferred work */
156 static void ixv_handle_que_work(struct work *, void *);
157
158 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
159 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
160
161 /************************************************************************
162 * FreeBSD Device Interface Entry Points
163 ************************************************************************/
164 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
165 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
166 DVF_DETACH_SHUTDOWN);
167
168 #if 0
169 static driver_t ixv_driver = {
170 "ixv", ixv_methods, sizeof(struct adapter),
171 };
172
173 devclass_t ixv_devclass;
174 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
175 MODULE_DEPEND(ixv, pci, 1, 1, 1);
176 MODULE_DEPEND(ixv, ether, 1, 1, 1);
177 #endif
178
179 /*
180 * TUNEABLE PARAMETERS:
181 */
182
183 /* Number of Queues - do not exceed MSI-X vectors - 1 */
184 static int ixv_num_queues = 0;
185 #define TUNABLE_INT(__x, __y)
186 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
187
188 /*
189 * AIM: Adaptive Interrupt Moderation
190 * which means that the interrupt rate
191 * is varied over time based on the
192 * traffic for that interrupt vector
193 */
194 static bool ixv_enable_aim = false;
195 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
196
197 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
198 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
199
200 /* How many packets rxeof tries to clean at a time */
201 static int ixv_rx_process_limit = 256;
202 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
203
204 /* How many packets txeof tries to clean at a time */
205 static int ixv_tx_process_limit = 256;
206 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
207
208 /* Which packet processing uses workqueue or softint */
209 static bool ixv_txrx_workqueue = false;
210
211 /*
212 * Number of TX descriptors per ring,
213 * setting higher than RX as this seems
214 * the better performing choice.
215 */
216 static int ixv_txd = PERFORM_TXD;
217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
218
219 /* Number of RX descriptors per ring */
220 static int ixv_rxd = PERFORM_RXD;
221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
222
223 /* Legacy Transmit (single queue) */
224 static int ixv_enable_legacy_tx = 0;
225 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
226
227 #ifdef NET_MPSAFE
228 #define IXGBE_MPSAFE 1
229 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
230 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
231 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
232 #else
233 #define IXGBE_CALLOUT_FLAGS 0
234 #define IXGBE_SOFTINFT_FLAGS 0
235 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
236 #endif
237 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
238
239 #if 0
240 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
241 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
242 #endif
243
244 /************************************************************************
245 * ixv_probe - Device identification routine
246 *
247 * Determines if the driver should be loaded on
248 * adapter based on its PCI vendor/device ID.
249 *
250 * return BUS_PROBE_DEFAULT on success, positive on failure
251 ************************************************************************/
252 static int
253 ixv_probe(device_t dev, cfdata_t cf, void *aux)
254 {
255 #ifdef __HAVE_PCI_MSI_MSIX
256 const struct pci_attach_args *pa = aux;
257
258 return (ixv_lookup(pa) != NULL) ? 1 : 0;
259 #else
260 return 0;
261 #endif
262 } /* ixv_probe */
263
264 static const ixgbe_vendor_info_t *
265 ixv_lookup(const struct pci_attach_args *pa)
266 {
267 const ixgbe_vendor_info_t *ent;
268 pcireg_t subid;
269
270 INIT_DEBUGOUT("ixv_lookup: begin");
271
272 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
273 return NULL;
274
275 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
276
277 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
278 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
279 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
280 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
281 (ent->subvendor_id == 0)) &&
282 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
283 (ent->subdevice_id == 0))) {
284 return ent;
285 }
286 }
287
288 return NULL;
289 }
290
291 /************************************************************************
292 * ixv_attach - Device initialization routine
293 *
294 * Called when the driver is being loaded.
295 * Identifies the type of hardware, allocates all resources
296 * and initializes the hardware.
297 *
298 * return 0 on success, positive on failure
299 ************************************************************************/
300 static void
301 ixv_attach(device_t parent, device_t dev, void *aux)
302 {
303 struct adapter *adapter;
304 struct ixgbe_hw *hw;
305 int error = 0;
306 pcireg_t id, subid;
307 const ixgbe_vendor_info_t *ent;
308 const struct pci_attach_args *pa = aux;
309 const char *apivstr;
310 const char *str;
311 char buf[256];
312
313 INIT_DEBUGOUT("ixv_attach: begin");
314
315 /*
316 * Make sure BUSMASTER is set, on a VM under
317 * KVM it may not be and will break things.
318 */
319 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
320
321 /* Allocate, clear, and link in our adapter structure */
322 adapter = device_private(dev);
323 adapter->dev = dev;
324 adapter->hw.back = adapter;
325 hw = &adapter->hw;
326
327 adapter->init_locked = ixv_init_locked;
328 adapter->stop_locked = ixv_stop;
329
330 adapter->osdep.pc = pa->pa_pc;
331 adapter->osdep.tag = pa->pa_tag;
332 if (pci_dma64_available(pa))
333 adapter->osdep.dmat = pa->pa_dmat64;
334 else
335 adapter->osdep.dmat = pa->pa_dmat;
336 adapter->osdep.attached = false;
337
338 ent = ixv_lookup(pa);
339
340 KASSERT(ent != NULL);
341
342 aprint_normal(": %s, Version - %s\n",
343 ixv_strings[ent->index], ixv_driver_version);
344
345 /* Core Lock Init*/
346 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
347
348 /* Do base PCI setup - map BAR0 */
349 if (ixv_allocate_pci_resources(adapter, pa)) {
350 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
351 error = ENXIO;
352 goto err_out;
353 }
354
355 /* SYSCTL APIs */
356 ixv_add_device_sysctls(adapter);
357
358 /* Set up the timer callout */
359 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
360
361 /* Save off the information about this board */
362 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
363 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
364 hw->vendor_id = PCI_VENDOR(id);
365 hw->device_id = PCI_PRODUCT(id);
366 hw->revision_id =
367 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
368 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
369 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
370
371 /* A subset of set_mac_type */
372 switch (hw->device_id) {
373 case IXGBE_DEV_ID_82599_VF:
374 hw->mac.type = ixgbe_mac_82599_vf;
375 str = "82599 VF";
376 break;
377 case IXGBE_DEV_ID_X540_VF:
378 hw->mac.type = ixgbe_mac_X540_vf;
379 str = "X540 VF";
380 break;
381 case IXGBE_DEV_ID_X550_VF:
382 hw->mac.type = ixgbe_mac_X550_vf;
383 str = "X550 VF";
384 break;
385 case IXGBE_DEV_ID_X550EM_X_VF:
386 hw->mac.type = ixgbe_mac_X550EM_x_vf;
387 str = "X550EM X VF";
388 break;
389 case IXGBE_DEV_ID_X550EM_A_VF:
390 hw->mac.type = ixgbe_mac_X550EM_a_vf;
391 str = "X550EM A VF";
392 break;
393 default:
394 /* Shouldn't get here since probe succeeded */
395 aprint_error_dev(dev, "Unknown device ID!\n");
396 error = ENXIO;
397 goto err_out;
398 break;
399 }
400 aprint_normal_dev(dev, "device %s\n", str);
401
402 ixv_init_device_features(adapter);
403
404 /* Initialize the shared code */
405 error = ixgbe_init_ops_vf(hw);
406 if (error) {
407 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
408 error = EIO;
409 goto err_out;
410 }
411
412 /* Setup the mailbox */
413 ixgbe_init_mbx_params_vf(hw);
414
415 /* Set the right number of segments */
416 adapter->num_segs = IXGBE_82599_SCATTER;
417
418 /* Reset mbox api to 1.0 */
419 error = hw->mac.ops.reset_hw(hw);
420 if (error == IXGBE_ERR_RESET_FAILED)
421 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
422 else if (error)
423 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
424 error);
425 if (error) {
426 error = EIO;
427 goto err_out;
428 }
429
430 error = hw->mac.ops.init_hw(hw);
431 if (error) {
432 aprint_error_dev(dev, "...init_hw() failed!\n");
433 error = EIO;
434 goto err_out;
435 }
436
437 /* Negotiate mailbox API version */
438 error = ixv_negotiate_api(adapter);
439 if (error)
440 aprint_normal_dev(dev,
441 "MBX API negotiation failed during attach!\n");
442 switch (hw->api_version) {
443 case ixgbe_mbox_api_10:
444 apivstr = "1.0";
445 break;
446 case ixgbe_mbox_api_20:
447 apivstr = "2.0";
448 break;
449 case ixgbe_mbox_api_11:
450 apivstr = "1.1";
451 break;
452 case ixgbe_mbox_api_12:
453 apivstr = "1.2";
454 break;
455 case ixgbe_mbox_api_13:
456 apivstr = "1.3";
457 break;
458 default:
459 apivstr = "unknown";
460 break;
461 }
462 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
463
464 /* If no mac address was assigned, make a random one */
465 if (!ixv_check_ether_addr(hw->mac.addr)) {
466 u8 addr[ETHER_ADDR_LEN];
467 uint64_t rndval = cprng_strong64();
468
469 memcpy(addr, &rndval, sizeof(addr));
470 addr[0] &= 0xFE;
471 addr[0] |= 0x02;
472 bcopy(addr, hw->mac.addr, sizeof(addr));
473 }
474
475 /* Register for VLAN events */
476 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
477
478 /* Sysctls for limiting the amount of work done in the taskqueues */
479 ixv_set_sysctl_value(adapter, "rx_processing_limit",
480 "max number of rx packets to process",
481 &adapter->rx_process_limit, ixv_rx_process_limit);
482
483 ixv_set_sysctl_value(adapter, "tx_processing_limit",
484 "max number of tx packets to process",
485 &adapter->tx_process_limit, ixv_tx_process_limit);
486
487 /* Do descriptor calc and sanity checks */
488 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
489 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
490 aprint_error_dev(dev, "TXD config issue, using default!\n");
491 adapter->num_tx_desc = DEFAULT_TXD;
492 } else
493 adapter->num_tx_desc = ixv_txd;
494
495 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
496 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
497 aprint_error_dev(dev, "RXD config issue, using default!\n");
498 adapter->num_rx_desc = DEFAULT_RXD;
499 } else
500 adapter->num_rx_desc = ixv_rxd;
501
502 /* Setup MSI-X */
503 error = ixv_configure_interrupts(adapter);
504 if (error)
505 goto err_out;
506
507 /* Allocate our TX/RX Queues */
508 if (ixgbe_allocate_queues(adapter)) {
509 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
510 error = ENOMEM;
511 goto err_out;
512 }
513
514 /* hw.ix defaults init */
515 adapter->enable_aim = ixv_enable_aim;
516
517 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
518
519 error = ixv_allocate_msix(adapter, pa);
520 if (error) {
521 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
522 goto err_late;
523 }
524
525 /* Setup OS specific network interface */
526 error = ixv_setup_interface(dev, adapter);
527 if (error != 0) {
528 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
529 goto err_late;
530 }
531
532 /* Do the stats setup */
533 ixv_save_stats(adapter);
534 ixv_init_stats(adapter);
535 ixv_add_stats_sysctls(adapter);
536
537 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
538 ixgbe_netmap_attach(adapter);
539
540 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
541 aprint_verbose_dev(dev, "feature cap %s\n", buf);
542 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
543 aprint_verbose_dev(dev, "feature ena %s\n", buf);
544
545 INIT_DEBUGOUT("ixv_attach: end");
546 adapter->osdep.attached = true;
547
548 return;
549
550 err_late:
551 ixgbe_free_transmit_structures(adapter);
552 ixgbe_free_receive_structures(adapter);
553 free(adapter->queues, M_DEVBUF);
554 err_out:
555 ixv_free_pci_resources(adapter);
556 IXGBE_CORE_LOCK_DESTROY(adapter);
557
558 return;
559 } /* ixv_attach */
560
561 /************************************************************************
562 * ixv_detach - Device removal routine
563 *
564 * Called when the driver is being removed.
565 * Stops the adapter and deallocates all the resources
566 * that were allocated for driver operation.
567 *
568 * return 0 on success, positive on failure
569 ************************************************************************/
570 static int
571 ixv_detach(device_t dev, int flags)
572 {
573 struct adapter *adapter = device_private(dev);
574 struct ixgbe_hw *hw = &adapter->hw;
575 struct ix_queue *que = adapter->queues;
576 struct tx_ring *txr = adapter->tx_rings;
577 struct rx_ring *rxr = adapter->rx_rings;
578 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
579
580 INIT_DEBUGOUT("ixv_detach: begin");
581 if (adapter->osdep.attached == false)
582 return 0;
583
584 /* Stop the interface. Callouts are stopped in it. */
585 ixv_ifstop(adapter->ifp, 1);
586
587 #if NVLAN > 0
588 /* Make sure VLANs are not using driver */
589 if (!VLAN_ATTACHED(&adapter->osdep.ec))
590 ; /* nothing to do: no VLANs */
591 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
592 vlan_ifdetach(adapter->ifp);
593 else {
594 aprint_error_dev(dev, "VLANs in use, detach first\n");
595 return EBUSY;
596 }
597 #endif
598
599 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
600 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
601 softint_disestablish(txr->txr_si);
602 softint_disestablish(que->que_si);
603 }
604 if (adapter->txr_wq != NULL)
605 workqueue_destroy(adapter->txr_wq);
606 if (adapter->txr_wq_enqueued != NULL)
607 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
608 if (adapter->que_wq != NULL)
609 workqueue_destroy(adapter->que_wq);
610
611 /* Drain the Mailbox(link) queue */
612 softint_disestablish(adapter->link_si);
613
614 ether_ifdetach(adapter->ifp);
615 callout_halt(&adapter->timer, NULL);
616
617 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
618 netmap_detach(adapter->ifp);
619
620 ixv_free_pci_resources(adapter);
621 #if 0 /* XXX the NetBSD port is probably missing something here */
622 bus_generic_detach(dev);
623 #endif
624 if_detach(adapter->ifp);
625 if_percpuq_destroy(adapter->ipq);
626
627 sysctl_teardown(&adapter->sysctllog);
628 evcnt_detach(&adapter->efbig_tx_dma_setup);
629 evcnt_detach(&adapter->mbuf_defrag_failed);
630 evcnt_detach(&adapter->efbig2_tx_dma_setup);
631 evcnt_detach(&adapter->einval_tx_dma_setup);
632 evcnt_detach(&adapter->other_tx_dma_setup);
633 evcnt_detach(&adapter->eagain_tx_dma_setup);
634 evcnt_detach(&adapter->enomem_tx_dma_setup);
635 evcnt_detach(&adapter->watchdog_events);
636 evcnt_detach(&adapter->tso_err);
637 evcnt_detach(&adapter->link_irq);
638
639 txr = adapter->tx_rings;
640 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
641 evcnt_detach(&adapter->queues[i].irqs);
642 evcnt_detach(&adapter->queues[i].handleq);
643 evcnt_detach(&adapter->queues[i].req);
644 evcnt_detach(&txr->no_desc_avail);
645 evcnt_detach(&txr->total_packets);
646 evcnt_detach(&txr->tso_tx);
647 #ifndef IXGBE_LEGACY_TX
648 evcnt_detach(&txr->pcq_drops);
649 #endif
650
651 evcnt_detach(&rxr->rx_packets);
652 evcnt_detach(&rxr->rx_bytes);
653 evcnt_detach(&rxr->rx_copies);
654 evcnt_detach(&rxr->no_jmbuf);
655 evcnt_detach(&rxr->rx_discarded);
656 }
657 evcnt_detach(&stats->ipcs);
658 evcnt_detach(&stats->l4cs);
659 evcnt_detach(&stats->ipcs_bad);
660 evcnt_detach(&stats->l4cs_bad);
661
662 /* Packet Reception Stats */
663 evcnt_detach(&stats->vfgorc);
664 evcnt_detach(&stats->vfgprc);
665 evcnt_detach(&stats->vfmprc);
666
667 /* Packet Transmission Stats */
668 evcnt_detach(&stats->vfgotc);
669 evcnt_detach(&stats->vfgptc);
670
671 /* Mailbox Stats */
672 evcnt_detach(&hw->mbx.stats.msgs_tx);
673 evcnt_detach(&hw->mbx.stats.msgs_rx);
674 evcnt_detach(&hw->mbx.stats.acks);
675 evcnt_detach(&hw->mbx.stats.reqs);
676 evcnt_detach(&hw->mbx.stats.rsts);
677
678 ixgbe_free_transmit_structures(adapter);
679 ixgbe_free_receive_structures(adapter);
680 for (int i = 0; i < adapter->num_queues; i++) {
681 struct ix_queue *lque = &adapter->queues[i];
682 mutex_destroy(&lque->dc_mtx);
683 }
684 free(adapter->queues, M_DEVBUF);
685
686 IXGBE_CORE_LOCK_DESTROY(adapter);
687
688 return (0);
689 } /* ixv_detach */
690
691 /************************************************************************
692 * ixv_init_locked - Init entry point
693 *
694 * Used in two ways: It is used by the stack as an init entry
695 * point in network interface structure. It is also used
696 * by the driver as a hw/sw initialization routine to get
697 * to a consistent state.
698 *
699 * return 0 on success, positive on failure
700 ************************************************************************/
701 static void
702 ixv_init_locked(struct adapter *adapter)
703 {
704 struct ifnet *ifp = adapter->ifp;
705 device_t dev = adapter->dev;
706 struct ixgbe_hw *hw = &adapter->hw;
707 struct ix_queue *que;
708 int error = 0;
709 uint32_t mask;
710 int i;
711
712 INIT_DEBUGOUT("ixv_init_locked: begin");
713 KASSERT(mutex_owned(&adapter->core_mtx));
714 hw->adapter_stopped = FALSE;
715 hw->mac.ops.stop_adapter(hw);
716 callout_stop(&adapter->timer);
717 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
718 que->disabled_count = 0;
719
720 /* reprogram the RAR[0] in case user changed it. */
721 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
722
723 /* Get the latest mac address, User can use a LAA */
724 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
725 IXGBE_ETH_LENGTH_OF_ADDRESS);
726 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
727
728 /* Prepare transmit descriptors and buffers */
729 if (ixgbe_setup_transmit_structures(adapter)) {
730 aprint_error_dev(dev, "Could not setup transmit structures\n");
731 ixv_stop(adapter);
732 return;
733 }
734
735 /* Reset VF and renegotiate mailbox API version */
736 hw->mac.ops.reset_hw(hw);
737 hw->mac.ops.start_hw(hw);
738 error = ixv_negotiate_api(adapter);
739 if (error)
740 device_printf(dev,
741 "Mailbox API negotiation failed in init_locked!\n");
742
743 ixv_initialize_transmit_units(adapter);
744
745 /* Setup Multicast table */
746 ixv_set_multi(adapter);
747
748 /*
749 * Determine the correct mbuf pool
750 * for doing jumbo/headersplit
751 */
752 if (ifp->if_mtu > ETHERMTU)
753 adapter->rx_mbuf_sz = MJUMPAGESIZE;
754 else
755 adapter->rx_mbuf_sz = MCLBYTES;
756
757 /* Prepare receive descriptors and buffers */
758 if (ixgbe_setup_receive_structures(adapter)) {
759 device_printf(dev, "Could not setup receive structures\n");
760 ixv_stop(adapter);
761 return;
762 }
763
764 /* Configure RX settings */
765 ixv_initialize_receive_units(adapter);
766
767 #if 0 /* XXX isn't it required? -- msaitoh */
768 /* Set the various hardware offload abilities */
769 ifp->if_hwassist = 0;
770 if (ifp->if_capenable & IFCAP_TSO4)
771 ifp->if_hwassist |= CSUM_TSO;
772 if (ifp->if_capenable & IFCAP_TXCSUM) {
773 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
774 #if __FreeBSD_version >= 800000
775 ifp->if_hwassist |= CSUM_SCTP;
776 #endif
777 }
778 #endif
779
780 /* Set up VLAN offload and filter */
781 ixv_setup_vlan_support(adapter);
782
783 /* Set up MSI-X routing */
784 ixv_configure_ivars(adapter);
785
786 /* Set up auto-mask */
787 mask = (1 << adapter->vector);
788 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
789 mask |= (1 << que->msix);
790 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
791
792 /* Set moderation on the Link interrupt */
793 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
794
795 /* Stats init */
796 ixv_init_stats(adapter);
797
798 /* Config/Enable Link */
799 hw->mac.get_link_status = TRUE;
800 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
801 FALSE);
802
803 /* Start watchdog */
804 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
805
806 /* And now turn on interrupts */
807 ixv_enable_intr(adapter);
808
809 /* Update saved flags. See ixgbe_ifflags_cb() */
810 adapter->if_flags = ifp->if_flags;
811 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
812
813 /* Now inform the stack we're ready */
814 ifp->if_flags |= IFF_RUNNING;
815 ifp->if_flags &= ~IFF_OACTIVE;
816
817 return;
818 } /* ixv_init_locked */
819
820 /************************************************************************
821 * ixv_enable_queue
822 ************************************************************************/
823 static inline void
824 ixv_enable_queue(struct adapter *adapter, u32 vector)
825 {
826 struct ixgbe_hw *hw = &adapter->hw;
827 struct ix_queue *que = &adapter->queues[vector];
828 u32 queue = 1UL << vector;
829 u32 mask;
830
831 mutex_enter(&que->dc_mtx);
832 if (que->disabled_count > 0 && --que->disabled_count > 0)
833 goto out;
834
835 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
836 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
837 out:
838 mutex_exit(&que->dc_mtx);
839 } /* ixv_enable_queue */
840
841 /************************************************************************
842 * ixv_disable_queue
843 ************************************************************************/
844 static inline void
845 ixv_disable_queue(struct adapter *adapter, u32 vector)
846 {
847 struct ixgbe_hw *hw = &adapter->hw;
848 struct ix_queue *que = &adapter->queues[vector];
849 u32 queue = 1UL << vector;
850 u32 mask;
851
852 mutex_enter(&que->dc_mtx);
853 if (que->disabled_count++ > 0)
854 goto out;
855
856 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
857 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
858 out:
859 mutex_exit(&que->dc_mtx);
860 } /* ixv_disable_queue */
861
862 #if 0
863 static inline void
864 ixv_rearm_queues(struct adapter *adapter, u64 queues)
865 {
866 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
867 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
868 } /* ixv_rearm_queues */
869 #endif
870
871
872 /************************************************************************
873 * ixv_msix_que - MSI-X Queue Interrupt Service routine
874 ************************************************************************/
875 static int
876 ixv_msix_que(void *arg)
877 {
878 struct ix_queue *que = arg;
879 struct adapter *adapter = que->adapter;
880 struct tx_ring *txr = que->txr;
881 struct rx_ring *rxr = que->rxr;
882 bool more;
883 u32 newitr = 0;
884
885 ixv_disable_queue(adapter, que->msix);
886 ++que->irqs.ev_count;
887
888 #ifdef __NetBSD__
889 /* Don't run ixgbe_rxeof in interrupt context */
890 more = true;
891 #else
892 more = ixgbe_rxeof(que);
893 #endif
894
895 IXGBE_TX_LOCK(txr);
896 ixgbe_txeof(txr);
897 IXGBE_TX_UNLOCK(txr);
898
899 /* Do AIM now? */
900
901 if (adapter->enable_aim == false)
902 goto no_calc;
903 /*
904 * Do Adaptive Interrupt Moderation:
905 * - Write out last calculated setting
906 * - Calculate based on average size over
907 * the last interval.
908 */
909 if (que->eitr_setting)
910 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
911
912 que->eitr_setting = 0;
913
914 /* Idle, do nothing */
915 if ((txr->bytes == 0) && (rxr->bytes == 0))
916 goto no_calc;
917
918 if ((txr->bytes) && (txr->packets))
919 newitr = txr->bytes/txr->packets;
920 if ((rxr->bytes) && (rxr->packets))
921 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
922 newitr += 24; /* account for hardware frame, crc */
923
924 /* set an upper boundary */
925 newitr = uimin(newitr, 3000);
926
927 /* Be nice to the mid range */
928 if ((newitr > 300) && (newitr < 1200))
929 newitr = (newitr / 3);
930 else
931 newitr = (newitr / 2);
932
933 /*
934 * When RSC is used, ITR interval must be larger than RSC_DELAY.
935 * Currently, we use 2us for RSC_DELAY. The minimum value is always
936 * greater than 2us on 100M (and 10M?(not documented)), but it's not
937 * on 1G and higher.
938 */
939 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
940 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
941 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
942 newitr = IXGBE_MIN_RSC_EITR_10G1G;
943 }
944
945 /* save for next interrupt */
946 que->eitr_setting = newitr;
947
948 /* Reset state */
949 txr->bytes = 0;
950 txr->packets = 0;
951 rxr->bytes = 0;
952 rxr->packets = 0;
953
954 no_calc:
955 if (more)
956 softint_schedule(que->que_si);
957 else /* Re-enable this interrupt */
958 ixv_enable_queue(adapter, que->msix);
959
960 return 1;
961 } /* ixv_msix_que */
962
963 /************************************************************************
964 * ixv_msix_mbx
965 ************************************************************************/
966 static int
967 ixv_msix_mbx(void *arg)
968 {
969 struct adapter *adapter = arg;
970 struct ixgbe_hw *hw = &adapter->hw;
971
972 ++adapter->link_irq.ev_count;
973 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
974
975 /* Link status change */
976 hw->mac.get_link_status = TRUE;
977 softint_schedule(adapter->link_si);
978
979 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
980
981 return 1;
982 } /* ixv_msix_mbx */
983
984 static void
985 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
986 {
987
988 /*
989 * Newer devices than 82598 have VF function, so this function is
990 * simple.
991 */
992 itr |= IXGBE_EITR_CNT_WDIS;
993
994 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
995 }
996
997
998 /************************************************************************
999 * ixv_media_status - Media Ioctl callback
1000 *
1001 * Called whenever the user queries the status of
1002 * the interface using ifconfig.
1003 ************************************************************************/
1004 static void
1005 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1006 {
1007 struct adapter *adapter = ifp->if_softc;
1008
1009 INIT_DEBUGOUT("ixv_media_status: begin");
1010 IXGBE_CORE_LOCK(adapter);
1011 ixv_update_link_status(adapter);
1012
1013 ifmr->ifm_status = IFM_AVALID;
1014 ifmr->ifm_active = IFM_ETHER;
1015
1016 if (adapter->link_active != LINK_STATE_UP) {
1017 ifmr->ifm_active |= IFM_NONE;
1018 IXGBE_CORE_UNLOCK(adapter);
1019 return;
1020 }
1021
1022 ifmr->ifm_status |= IFM_ACTIVE;
1023
1024 switch (adapter->link_speed) {
1025 case IXGBE_LINK_SPEED_10GB_FULL:
1026 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1027 break;
1028 case IXGBE_LINK_SPEED_5GB_FULL:
1029 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1030 break;
1031 case IXGBE_LINK_SPEED_2_5GB_FULL:
1032 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1033 break;
1034 case IXGBE_LINK_SPEED_1GB_FULL:
1035 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1036 break;
1037 case IXGBE_LINK_SPEED_100_FULL:
1038 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1039 break;
1040 case IXGBE_LINK_SPEED_10_FULL:
1041 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1042 break;
1043 }
1044
1045 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1046
1047 IXGBE_CORE_UNLOCK(adapter);
1048 } /* ixv_media_status */
1049
1050 /************************************************************************
1051 * ixv_media_change - Media Ioctl callback
1052 *
1053 * Called when the user changes speed/duplex using
1054 * media/mediopt option with ifconfig.
1055 ************************************************************************/
1056 static int
1057 ixv_media_change(struct ifnet *ifp)
1058 {
1059 struct adapter *adapter = ifp->if_softc;
1060 struct ifmedia *ifm = &adapter->media;
1061
1062 INIT_DEBUGOUT("ixv_media_change: begin");
1063
1064 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1065 return (EINVAL);
1066
1067 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1068 case IFM_AUTO:
1069 break;
1070 default:
1071 device_printf(adapter->dev, "Only auto media type\n");
1072 return (EINVAL);
1073 }
1074
1075 return (0);
1076 } /* ixv_media_change */
1077
1078 /************************************************************************
1079 * ixv_set_promisc
1080 ************************************************************************/
1081 static int
1082 ixv_set_promisc(struct adapter *adapter)
1083 {
1084 struct ifnet *ifp = adapter->ifp;
1085 struct ixgbe_hw *hw = &adapter->hw;
1086 struct ethercom *ec = &adapter->osdep.ec;
1087 int error = 0;
1088
1089 KASSERT(mutex_owned(&adapter->core_mtx));
1090 if (ifp->if_flags & IFF_PROMISC) {
1091 error = hw->mac.ops.update_xcast_mode(hw,
1092 IXGBEVF_XCAST_MODE_PROMISC);
1093 if (error == IXGBE_ERR_NOT_TRUSTED) {
1094 device_printf(adapter->dev,
1095 "this interface is not trusted\n");
1096 error = EPERM;
1097 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1098 device_printf(adapter->dev,
1099 "the PF doesn't support promisc mode\n");
1100 error = EOPNOTSUPP;
1101 } else if (error) {
1102 device_printf(adapter->dev,
1103 "failed to set promisc mode. error = %d\n",
1104 error);
1105 error = EIO;
1106 }
1107 } else if (ec->ec_flags & ETHER_F_ALLMULTI) {
1108 error = hw->mac.ops.update_xcast_mode(hw,
1109 IXGBEVF_XCAST_MODE_ALLMULTI);
1110 if (error == IXGBE_ERR_NOT_TRUSTED) {
1111 device_printf(adapter->dev,
1112 "this interface is not trusted\n");
1113 error = EPERM;
1114 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1115 device_printf(adapter->dev,
1116 "the PF doesn't support allmulti mode\n");
1117 error = EOPNOTSUPP;
1118 } else if (error) {
1119 device_printf(adapter->dev,
1120 "failed to set allmulti mode. error = %d\n",
1121 error);
1122 error = EIO;
1123 }
1124 } else {
1125 error = hw->mac.ops.update_xcast_mode(hw,
1126 IXGBEVF_XCAST_MODE_MULTI);
1127 if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1128 /* normal operation */
1129 error = 0;
1130 } else if (error) {
1131 device_printf(adapter->dev,
1132 "failed to chane filtering mode to normal. "
1133 "error = %d\n", error);
1134 error = EIO;
1135 }
1136 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1137 }
1138
1139 return error;
1140 } /* ixv_set_promisc */
1141
1142 /************************************************************************
1143 * ixv_negotiate_api
1144 *
1145 * Negotiate the Mailbox API with the PF;
1146 * start with the most featured API first.
1147 ************************************************************************/
1148 static int
1149 ixv_negotiate_api(struct adapter *adapter)
1150 {
1151 struct ixgbe_hw *hw = &adapter->hw;
1152 int mbx_api[] = { ixgbe_mbox_api_13,
1153 ixgbe_mbox_api_12,
1154 ixgbe_mbox_api_11,
1155 ixgbe_mbox_api_10,
1156 ixgbe_mbox_api_unknown };
1157 int i = 0;
1158
1159 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1160 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1161 return (0);
1162 i++;
1163 }
1164
1165 return (EINVAL);
1166 } /* ixv_negotiate_api */
1167
1168
1169 /************************************************************************
1170 * ixv_set_multi - Multicast Update
1171 *
1172 * Called whenever multicast address list is updated.
1173 ************************************************************************/
1174 static void
1175 ixv_set_multi(struct adapter *adapter)
1176 {
1177 struct ixgbe_hw *hw = &adapter->hw;
1178 struct ether_multi *enm;
1179 struct ether_multistep step;
1180 struct ethercom *ec = &adapter->osdep.ec;
1181 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
1182 u8 *update_ptr;
1183 int mcnt = 0;
1184 bool overflow = false;
1185 bool allmulti = false;
1186 int error;
1187
1188 KASSERT(mutex_owned(&adapter->core_mtx));
1189 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1190
1191 ETHER_LOCK(ec);
1192 ETHER_FIRST_MULTI(step, ec, enm);
1193 while (enm != NULL) {
1194 if (mcnt >= IXGBE_MAX_VF_MC) {
1195 overflow = true;
1196 break;
1197 }
1198 bcopy(enm->enm_addrlo,
1199 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1200 IXGBE_ETH_LENGTH_OF_ADDRESS);
1201 mcnt++;
1202 ETHER_NEXT_MULTI(step, enm);
1203 }
1204 ETHER_UNLOCK(ec);
1205
1206 if (overflow) {
1207 error = hw->mac.ops.update_xcast_mode(hw,
1208 IXGBEVF_XCAST_MODE_ALLMULTI);
1209 if (error == IXGBE_ERR_NOT_TRUSTED) {
1210 device_printf(adapter->dev,
1211 "this interface is not trusted\n");
1212 error = EPERM;
1213 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1214 device_printf(adapter->dev,
1215 "the PF doesn't support allmulti mode\n");
1216 error = EOPNOTSUPP;
1217 } else if (error) {
1218 device_printf(adapter->dev,
1219 "number of Ethernet multicast addresses "
1220 "exceeds the limit (%d). error = %d\n",
1221 IXGBE_MAX_VF_MC, error);
1222 error = ENOSPC;
1223 } else {
1224 allmulti = true;
1225 ec->ec_flags |= ETHER_F_ALLMULTI;
1226 }
1227 }
1228
1229 if (!allmulti) {
1230 error = hw->mac.ops.update_xcast_mode(hw,
1231 IXGBEVF_XCAST_MODE_MULTI);
1232 if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1233 /* normal operation */
1234 error = 0;
1235 } else if (error) {
1236 device_printf(adapter->dev,
1237 "failed to set Ethernet multicast address "
1238 "operation to normal. error = %d\n", error);
1239 }
1240 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1241 }
1242
1243 update_ptr = mta;
1244
1245 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1246 ixv_mc_array_itr, TRUE);
1247 } /* ixv_set_multi */
1248
1249 /************************************************************************
1250 * ixv_mc_array_itr
1251 *
1252 * An iterator function needed by the multicast shared code.
1253 * It feeds the shared code routine the addresses in the
1254 * array of ixv_set_multi() one by one.
1255 ************************************************************************/
1256 static u8 *
1257 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1258 {
1259 u8 *addr = *update_ptr;
1260 u8 *newptr;
1261
1262 *vmdq = 0;
1263
1264 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1265 *update_ptr = newptr;
1266
1267 return addr;
1268 } /* ixv_mc_array_itr */
1269
1270 /************************************************************************
1271 * ixv_local_timer - Timer routine
1272 *
1273 * Checks for link status, updates statistics,
1274 * and runs the watchdog check.
1275 ************************************************************************/
1276 static void
1277 ixv_local_timer(void *arg)
1278 {
1279 struct adapter *adapter = arg;
1280
1281 IXGBE_CORE_LOCK(adapter);
1282 ixv_local_timer_locked(adapter);
1283 IXGBE_CORE_UNLOCK(adapter);
1284 }
1285
1286 static void
1287 ixv_local_timer_locked(void *arg)
1288 {
1289 struct adapter *adapter = arg;
1290 device_t dev = adapter->dev;
1291 struct ix_queue *que = adapter->queues;
1292 u64 queues = 0;
1293 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1294 int hung = 0;
1295 int i;
1296
1297 KASSERT(mutex_owned(&adapter->core_mtx));
1298
1299 if (ixv_check_link(adapter)) {
1300 ixv_init_locked(adapter);
1301 return;
1302 }
1303
1304 /* Stats Update */
1305 ixv_update_stats(adapter);
1306
1307 /* Update some event counters */
1308 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1309 que = adapter->queues;
1310 for (i = 0; i < adapter->num_queues; i++, que++) {
1311 struct tx_ring *txr = que->txr;
1312
1313 v0 += txr->q_efbig_tx_dma_setup;
1314 v1 += txr->q_mbuf_defrag_failed;
1315 v2 += txr->q_efbig2_tx_dma_setup;
1316 v3 += txr->q_einval_tx_dma_setup;
1317 v4 += txr->q_other_tx_dma_setup;
1318 v5 += txr->q_eagain_tx_dma_setup;
1319 v6 += txr->q_enomem_tx_dma_setup;
1320 v7 += txr->q_tso_err;
1321 }
1322 adapter->efbig_tx_dma_setup.ev_count = v0;
1323 adapter->mbuf_defrag_failed.ev_count = v1;
1324 adapter->efbig2_tx_dma_setup.ev_count = v2;
1325 adapter->einval_tx_dma_setup.ev_count = v3;
1326 adapter->other_tx_dma_setup.ev_count = v4;
1327 adapter->eagain_tx_dma_setup.ev_count = v5;
1328 adapter->enomem_tx_dma_setup.ev_count = v6;
1329 adapter->tso_err.ev_count = v7;
1330
1331 /*
1332 * Check the TX queues status
1333 * - mark hung queues so we don't schedule on them
1334 * - watchdog only if all queues show hung
1335 */
1336 que = adapter->queues;
1337 for (i = 0; i < adapter->num_queues; i++, que++) {
1338 /* Keep track of queues with work for soft irq */
1339 if (que->txr->busy)
1340 queues |= ((u64)1 << que->me);
1341 /*
1342 * Each time txeof runs without cleaning, but there
1343 * are uncleaned descriptors it increments busy. If
1344 * we get to the MAX we declare it hung.
1345 */
1346 if (que->busy == IXGBE_QUEUE_HUNG) {
1347 ++hung;
1348 /* Mark the queue as inactive */
1349 adapter->active_queues &= ~((u64)1 << que->me);
1350 continue;
1351 } else {
1352 /* Check if we've come back from hung */
1353 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1354 adapter->active_queues |= ((u64)1 << que->me);
1355 }
1356 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1357 device_printf(dev,
1358 "Warning queue %d appears to be hung!\n", i);
1359 que->txr->busy = IXGBE_QUEUE_HUNG;
1360 ++hung;
1361 }
1362 }
1363
1364 /* Only truly watchdog if all queues show hung */
1365 if (hung == adapter->num_queues)
1366 goto watchdog;
1367 #if 0
1368 else if (queues != 0) { /* Force an IRQ on queues with work */
1369 ixv_rearm_queues(adapter, queues);
1370 }
1371 #endif
1372
1373 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1374
1375 return;
1376
1377 watchdog:
1378
1379 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1380 adapter->ifp->if_flags &= ~IFF_RUNNING;
1381 adapter->watchdog_events.ev_count++;
1382 ixv_init_locked(adapter);
1383 } /* ixv_local_timer */
1384
1385 /************************************************************************
1386 * ixv_update_link_status - Update OS on link state
1387 *
1388 * Note: Only updates the OS on the cached link state.
1389 * The real check of the hardware only happens with
1390 * a link interrupt.
1391 ************************************************************************/
1392 static void
1393 ixv_update_link_status(struct adapter *adapter)
1394 {
1395 struct ifnet *ifp = adapter->ifp;
1396 device_t dev = adapter->dev;
1397
1398 KASSERT(mutex_owned(&adapter->core_mtx));
1399
1400 if (adapter->link_up) {
1401 if (adapter->link_active != LINK_STATE_UP) {
1402 if (bootverbose) {
1403 const char *bpsmsg;
1404
1405 switch (adapter->link_speed) {
1406 case IXGBE_LINK_SPEED_10GB_FULL:
1407 bpsmsg = "10 Gbps";
1408 break;
1409 case IXGBE_LINK_SPEED_5GB_FULL:
1410 bpsmsg = "5 Gbps";
1411 break;
1412 case IXGBE_LINK_SPEED_2_5GB_FULL:
1413 bpsmsg = "2.5 Gbps";
1414 break;
1415 case IXGBE_LINK_SPEED_1GB_FULL:
1416 bpsmsg = "1 Gbps";
1417 break;
1418 case IXGBE_LINK_SPEED_100_FULL:
1419 bpsmsg = "100 Mbps";
1420 break;
1421 case IXGBE_LINK_SPEED_10_FULL:
1422 bpsmsg = "10 Mbps";
1423 break;
1424 default:
1425 bpsmsg = "unknown speed";
1426 break;
1427 }
1428 device_printf(dev, "Link is up %s %s \n",
1429 bpsmsg, "Full Duplex");
1430 }
1431 adapter->link_active = LINK_STATE_UP;
1432 if_link_state_change(ifp, LINK_STATE_UP);
1433 }
1434 } else {
1435 /*
1436 * Do it when link active changes to DOWN. i.e.
1437 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1438 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1439 */
1440 if (adapter->link_active != LINK_STATE_DOWN) {
1441 if (bootverbose)
1442 device_printf(dev, "Link is Down\n");
1443 if_link_state_change(ifp, LINK_STATE_DOWN);
1444 adapter->link_active = LINK_STATE_DOWN;
1445 }
1446 }
1447 } /* ixv_update_link_status */
1448
1449
1450 /************************************************************************
1451 * ixv_stop - Stop the hardware
1452 *
1453 * Disables all traffic on the adapter by issuing a
1454 * global reset on the MAC and deallocates TX/RX buffers.
1455 ************************************************************************/
1456 static void
1457 ixv_ifstop(struct ifnet *ifp, int disable)
1458 {
1459 struct adapter *adapter = ifp->if_softc;
1460
1461 IXGBE_CORE_LOCK(adapter);
1462 ixv_stop(adapter);
1463 IXGBE_CORE_UNLOCK(adapter);
1464 }
1465
1466 static void
1467 ixv_stop(void *arg)
1468 {
1469 struct ifnet *ifp;
1470 struct adapter *adapter = arg;
1471 struct ixgbe_hw *hw = &adapter->hw;
1472
1473 ifp = adapter->ifp;
1474
1475 KASSERT(mutex_owned(&adapter->core_mtx));
1476
1477 INIT_DEBUGOUT("ixv_stop: begin\n");
1478 ixv_disable_intr(adapter);
1479
1480 /* Tell the stack that the interface is no longer active */
1481 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1482
1483 hw->mac.ops.reset_hw(hw);
1484 adapter->hw.adapter_stopped = FALSE;
1485 hw->mac.ops.stop_adapter(hw);
1486 callout_stop(&adapter->timer);
1487
1488 /* reprogram the RAR[0] in case user changed it. */
1489 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1490
1491 return;
1492 } /* ixv_stop */
1493
1494
1495 /************************************************************************
1496 * ixv_allocate_pci_resources
1497 ************************************************************************/
1498 static int
1499 ixv_allocate_pci_resources(struct adapter *adapter,
1500 const struct pci_attach_args *pa)
1501 {
1502 pcireg_t memtype, csr;
1503 device_t dev = adapter->dev;
1504 bus_addr_t addr;
1505 int flags;
1506
1507 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1508 switch (memtype) {
1509 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1510 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1511 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1512 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1513 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1514 goto map_err;
1515 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1516 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1517 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1518 }
1519 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1520 adapter->osdep.mem_size, flags,
1521 &adapter->osdep.mem_bus_space_handle) != 0) {
1522 map_err:
1523 adapter->osdep.mem_size = 0;
1524 aprint_error_dev(dev, "unable to map BAR0\n");
1525 return ENXIO;
1526 }
1527 /*
1528 * Enable address decoding for memory range in case it's not
1529 * set.
1530 */
1531 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1532 PCI_COMMAND_STATUS_REG);
1533 csr |= PCI_COMMAND_MEM_ENABLE;
1534 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1535 csr);
1536 break;
1537 default:
1538 aprint_error_dev(dev, "unexpected type on BAR0\n");
1539 return ENXIO;
1540 }
1541
1542 /* Pick up the tuneable queues */
1543 adapter->num_queues = ixv_num_queues;
1544
1545 return (0);
1546 } /* ixv_allocate_pci_resources */
1547
1548 /************************************************************************
1549 * ixv_free_pci_resources
1550 ************************************************************************/
1551 static void
1552 ixv_free_pci_resources(struct adapter * adapter)
1553 {
1554 struct ix_queue *que = adapter->queues;
1555 int rid;
1556
1557 /*
1558 * Release all msix queue resources:
1559 */
1560 for (int i = 0; i < adapter->num_queues; i++, que++) {
1561 if (que->res != NULL)
1562 pci_intr_disestablish(adapter->osdep.pc,
1563 adapter->osdep.ihs[i]);
1564 }
1565
1566
1567 /* Clean the Mailbox interrupt last */
1568 rid = adapter->vector;
1569
1570 if (adapter->osdep.ihs[rid] != NULL) {
1571 pci_intr_disestablish(adapter->osdep.pc,
1572 adapter->osdep.ihs[rid]);
1573 adapter->osdep.ihs[rid] = NULL;
1574 }
1575
1576 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1577 adapter->osdep.nintrs);
1578
1579 if (adapter->osdep.mem_size != 0) {
1580 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1581 adapter->osdep.mem_bus_space_handle,
1582 adapter->osdep.mem_size);
1583 }
1584
1585 return;
1586 } /* ixv_free_pci_resources */
1587
1588 /************************************************************************
1589 * ixv_setup_interface
1590 *
1591 * Setup networking device structure and register an interface.
1592 ************************************************************************/
1593 static int
1594 ixv_setup_interface(device_t dev, struct adapter *adapter)
1595 {
1596 struct ethercom *ec = &adapter->osdep.ec;
1597 struct ifnet *ifp;
1598 int rv;
1599
1600 INIT_DEBUGOUT("ixv_setup_interface: begin");
1601
1602 ifp = adapter->ifp = &ec->ec_if;
1603 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1604 ifp->if_baudrate = IF_Gbps(10);
1605 ifp->if_init = ixv_init;
1606 ifp->if_stop = ixv_ifstop;
1607 ifp->if_softc = adapter;
1608 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1609 #ifdef IXGBE_MPSAFE
1610 ifp->if_extflags = IFEF_MPSAFE;
1611 #endif
1612 ifp->if_ioctl = ixv_ioctl;
1613 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1614 #if 0
1615 ixv_start_locked = ixgbe_legacy_start_locked;
1616 #endif
1617 } else {
1618 ifp->if_transmit = ixgbe_mq_start;
1619 #if 0
1620 ixv_start_locked = ixgbe_mq_start_locked;
1621 #endif
1622 }
1623 ifp->if_start = ixgbe_legacy_start;
1624 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1625 IFQ_SET_READY(&ifp->if_snd);
1626
1627 rv = if_initialize(ifp);
1628 if (rv != 0) {
1629 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1630 return rv;
1631 }
1632 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1633 ether_ifattach(ifp, adapter->hw.mac.addr);
1634 /*
1635 * We use per TX queue softint, so if_deferred_start_init() isn't
1636 * used.
1637 */
1638 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1639
1640 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1641
1642 /*
1643 * Tell the upper layer(s) we support long frames.
1644 */
1645 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1646
1647 /* Set capability flags */
1648 ifp->if_capabilities |= IFCAP_HWCSUM
1649 | IFCAP_TSOv4
1650 | IFCAP_TSOv6;
1651 ifp->if_capenable = 0;
1652
1653 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1654 | ETHERCAP_VLAN_HWTAGGING
1655 | ETHERCAP_VLAN_HWCSUM
1656 | ETHERCAP_JUMBO_MTU
1657 | ETHERCAP_VLAN_MTU;
1658
1659 /* Enable the above capabilities by default */
1660 ec->ec_capenable = ec->ec_capabilities;
1661
1662 /* Don't enable LRO by default */
1663 #if 0
1664 /* NetBSD doesn't support LRO yet */
1665 ifp->if_capabilities |= IFCAP_LRO;
1666 #endif
1667
1668 /*
1669 * Specify the media types supported by this adapter and register
1670 * callbacks to update media and link information
1671 */
1672 ec->ec_ifmedia = &adapter->media;
1673 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1674 ixv_media_status);
1675 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1676 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1677
1678 if_register(ifp);
1679
1680 return 0;
1681 } /* ixv_setup_interface */
1682
1683
1684 /************************************************************************
1685 * ixv_initialize_transmit_units - Enable transmit unit.
1686 ************************************************************************/
1687 static void
1688 ixv_initialize_transmit_units(struct adapter *adapter)
1689 {
1690 struct tx_ring *txr = adapter->tx_rings;
1691 struct ixgbe_hw *hw = &adapter->hw;
1692 int i;
1693
1694 for (i = 0; i < adapter->num_queues; i++, txr++) {
1695 u64 tdba = txr->txdma.dma_paddr;
1696 u32 txctrl, txdctl;
1697 int j = txr->me;
1698
1699 /* Set WTHRESH to 8, burst writeback */
1700 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1701 txdctl |= (8 << 16);
1702 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1703
1704 /* Set the HW Tx Head and Tail indices */
1705 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1706 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1707
1708 /* Set Tx Tail register */
1709 txr->tail = IXGBE_VFTDT(j);
1710
1711 txr->txr_no_space = false;
1712
1713 /* Set Ring parameters */
1714 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1715 (tdba & 0x00000000ffffffffULL));
1716 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1717 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1718 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1719 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1720 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1721 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1722
1723 /* Now enable */
1724 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1725 txdctl |= IXGBE_TXDCTL_ENABLE;
1726 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1727 }
1728
1729 return;
1730 } /* ixv_initialize_transmit_units */
1731
1732
1733 /************************************************************************
1734 * ixv_initialize_rss_mapping
1735 ************************************************************************/
1736 static void
1737 ixv_initialize_rss_mapping(struct adapter *adapter)
1738 {
1739 struct ixgbe_hw *hw = &adapter->hw;
1740 u32 reta = 0, mrqc, rss_key[10];
1741 int queue_id;
1742 int i, j;
1743 u32 rss_hash_config;
1744
1745 /* force use default RSS key. */
1746 #ifdef __NetBSD__
1747 rss_getkey((uint8_t *) &rss_key);
1748 #else
1749 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1750 /* Fetch the configured RSS key */
1751 rss_getkey((uint8_t *)&rss_key);
1752 } else {
1753 /* set up random bits */
1754 cprng_fast(&rss_key, sizeof(rss_key));
1755 }
1756 #endif
1757
1758 /* Now fill out hash function seeds */
1759 for (i = 0; i < 10; i++)
1760 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1761
1762 /* Set up the redirection table */
1763 for (i = 0, j = 0; i < 64; i++, j++) {
1764 if (j == adapter->num_queues)
1765 j = 0;
1766
1767 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1768 /*
1769 * Fetch the RSS bucket id for the given indirection
1770 * entry. Cap it at the number of configured buckets
1771 * (which is num_queues.)
1772 */
1773 queue_id = rss_get_indirection_to_bucket(i);
1774 queue_id = queue_id % adapter->num_queues;
1775 } else
1776 queue_id = j;
1777
1778 /*
1779 * The low 8 bits are for hash value (n+0);
1780 * The next 8 bits are for hash value (n+1), etc.
1781 */
1782 reta >>= 8;
1783 reta |= ((uint32_t)queue_id) << 24;
1784 if ((i & 3) == 3) {
1785 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1786 reta = 0;
1787 }
1788 }
1789
1790 /* Perform hash on these packet types */
1791 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1792 rss_hash_config = rss_gethashconfig();
1793 else {
1794 /*
1795 * Disable UDP - IP fragments aren't currently being handled
1796 * and so we end up with a mix of 2-tuple and 4-tuple
1797 * traffic.
1798 */
1799 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1800 | RSS_HASHTYPE_RSS_TCP_IPV4
1801 | RSS_HASHTYPE_RSS_IPV6
1802 | RSS_HASHTYPE_RSS_TCP_IPV6;
1803 }
1804
1805 mrqc = IXGBE_MRQC_RSSEN;
1806 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1807 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1808 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1809 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1810 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1811 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1812 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1813 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1814 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1815 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1816 __func__);
1817 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1818 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1819 __func__);
1820 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1821 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1822 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1823 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1824 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1825 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1826 __func__);
1827 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1828 } /* ixv_initialize_rss_mapping */
1829
1830
1831 /************************************************************************
1832 * ixv_initialize_receive_units - Setup receive registers and features.
1833 ************************************************************************/
1834 static void
1835 ixv_initialize_receive_units(struct adapter *adapter)
1836 {
1837 struct rx_ring *rxr = adapter->rx_rings;
1838 struct ixgbe_hw *hw = &adapter->hw;
1839 struct ifnet *ifp = adapter->ifp;
1840 u32 bufsz, psrtype;
1841
1842 if (ifp->if_mtu > ETHERMTU)
1843 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1844 else
1845 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1846
1847 psrtype = IXGBE_PSRTYPE_TCPHDR
1848 | IXGBE_PSRTYPE_UDPHDR
1849 | IXGBE_PSRTYPE_IPV4HDR
1850 | IXGBE_PSRTYPE_IPV6HDR
1851 | IXGBE_PSRTYPE_L2HDR;
1852
1853 if (adapter->num_queues > 1)
1854 psrtype |= 1 << 29;
1855
1856 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1857
1858 /* Tell PF our max_frame size */
1859 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1860 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1861 }
1862
1863 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1864 u64 rdba = rxr->rxdma.dma_paddr;
1865 u32 reg, rxdctl;
1866 int j = rxr->me;
1867
1868 /* Disable the queue */
1869 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1870 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1871 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1872 for (int k = 0; k < 10; k++) {
1873 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1874 IXGBE_RXDCTL_ENABLE)
1875 msec_delay(1);
1876 else
1877 break;
1878 }
1879 wmb();
1880 /* Setup the Base and Length of the Rx Descriptor Ring */
1881 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1882 (rdba & 0x00000000ffffffffULL));
1883 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1884 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1885 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1886
1887 /* Reset the ring indices */
1888 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1889 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1890
1891 /* Set up the SRRCTL register */
1892 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1893 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1894 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1895 reg |= bufsz;
1896 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1897 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1898
1899 /* Capture Rx Tail index */
1900 rxr->tail = IXGBE_VFRDT(rxr->me);
1901
1902 /* Do the queue enabling last */
1903 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1904 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1905 for (int k = 0; k < 10; k++) {
1906 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1907 IXGBE_RXDCTL_ENABLE)
1908 break;
1909 msec_delay(1);
1910 }
1911 wmb();
1912
1913 /* Set the Tail Pointer */
1914 #ifdef DEV_NETMAP
1915 /*
1916 * In netmap mode, we must preserve the buffers made
1917 * available to userspace before the if_init()
1918 * (this is true by default on the TX side, because
1919 * init makes all buffers available to userspace).
1920 *
1921 * netmap_reset() and the device specific routines
1922 * (e.g. ixgbe_setup_receive_rings()) map these
1923 * buffers at the end of the NIC ring, so here we
1924 * must set the RDT (tail) register to make sure
1925 * they are not overwritten.
1926 *
1927 * In this driver the NIC ring starts at RDH = 0,
1928 * RDT points to the last slot available for reception (?),
1929 * so RDT = num_rx_desc - 1 means the whole ring is available.
1930 */
1931 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1932 (ifp->if_capenable & IFCAP_NETMAP)) {
1933 struct netmap_adapter *na = NA(adapter->ifp);
1934 struct netmap_kring *kring = na->rx_rings[i];
1935 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1936
1937 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1938 } else
1939 #endif /* DEV_NETMAP */
1940 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1941 adapter->num_rx_desc - 1);
1942 }
1943
1944 ixv_initialize_rss_mapping(adapter);
1945 } /* ixv_initialize_receive_units */
1946
1947 /************************************************************************
1948 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1949 *
1950 * Retrieves the TDH value from the hardware
1951 ************************************************************************/
1952 static int
1953 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1954 {
1955 struct sysctlnode node = *rnode;
1956 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1957 uint32_t val;
1958
1959 if (!txr)
1960 return (0);
1961
1962 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1963 node.sysctl_data = &val;
1964 return sysctl_lookup(SYSCTLFN_CALL(&node));
1965 } /* ixv_sysctl_tdh_handler */
1966
1967 /************************************************************************
1968 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1969 *
1970 * Retrieves the TDT value from the hardware
1971 ************************************************************************/
1972 static int
1973 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1974 {
1975 struct sysctlnode node = *rnode;
1976 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1977 uint32_t val;
1978
1979 if (!txr)
1980 return (0);
1981
1982 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1983 node.sysctl_data = &val;
1984 return sysctl_lookup(SYSCTLFN_CALL(&node));
1985 } /* ixv_sysctl_tdt_handler */
1986
1987 /************************************************************************
1988 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1989 * handler function
1990 *
1991 * Retrieves the next_to_check value
1992 ************************************************************************/
1993 static int
1994 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1995 {
1996 struct sysctlnode node = *rnode;
1997 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1998 uint32_t val;
1999
2000 if (!rxr)
2001 return (0);
2002
2003 val = rxr->next_to_check;
2004 node.sysctl_data = &val;
2005 return sysctl_lookup(SYSCTLFN_CALL(&node));
2006 } /* ixv_sysctl_next_to_check_handler */
2007
2008 /************************************************************************
2009 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
2010 *
2011 * Retrieves the RDH value from the hardware
2012 ************************************************************************/
2013 static int
2014 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
2015 {
2016 struct sysctlnode node = *rnode;
2017 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2018 uint32_t val;
2019
2020 if (!rxr)
2021 return (0);
2022
2023 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
2024 node.sysctl_data = &val;
2025 return sysctl_lookup(SYSCTLFN_CALL(&node));
2026 } /* ixv_sysctl_rdh_handler */
2027
2028 /************************************************************************
2029 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
2030 *
2031 * Retrieves the RDT value from the hardware
2032 ************************************************************************/
2033 static int
2034 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
2035 {
2036 struct sysctlnode node = *rnode;
2037 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2038 uint32_t val;
2039
2040 if (!rxr)
2041 return (0);
2042
2043 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
2044 node.sysctl_data = &val;
2045 return sysctl_lookup(SYSCTLFN_CALL(&node));
2046 } /* ixv_sysctl_rdt_handler */
2047
2048 static void
2049 ixv_setup_vlan_tagging(struct adapter *adapter)
2050 {
2051 struct ethercom *ec = &adapter->osdep.ec;
2052 struct ixgbe_hw *hw = &adapter->hw;
2053 struct rx_ring *rxr;
2054 u32 ctrl;
2055 int i;
2056 bool hwtagging;
2057
2058 /* Enable HW tagging only if any vlan is attached */
2059 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2060 && VLAN_ATTACHED(ec);
2061
2062 /* Enable the queues */
2063 for (i = 0; i < adapter->num_queues; i++) {
2064 rxr = &adapter->rx_rings[i];
2065 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2066 if (hwtagging)
2067 ctrl |= IXGBE_RXDCTL_VME;
2068 else
2069 ctrl &= ~IXGBE_RXDCTL_VME;
2070 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2071 /*
2072 * Let Rx path know that it needs to store VLAN tag
2073 * as part of extra mbuf info.
2074 */
2075 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2076 }
2077 } /* ixv_setup_vlan_tagging */
2078
2079 /************************************************************************
2080 * ixv_setup_vlan_support
2081 ************************************************************************/
2082 static int
2083 ixv_setup_vlan_support(struct adapter *adapter)
2084 {
2085 struct ethercom *ec = &adapter->osdep.ec;
2086 struct ixgbe_hw *hw = &adapter->hw;
2087 u32 vid, vfta, retry;
2088 struct vlanid_list *vlanidp;
2089 int rv, error = 0;
2090
2091 /*
2092 * This function is called from both if_init and ifflags_cb()
2093 * on NetBSD.
2094 */
2095
2096 /*
2097 * Part 1:
2098 * Setup VLAN HW tagging
2099 */
2100 ixv_setup_vlan_tagging(adapter);
2101
2102 if (!VLAN_ATTACHED(ec))
2103 return 0;
2104
2105 /*
2106 * Part 2:
2107 * Setup VLAN HW filter
2108 */
2109 /* Cleanup shadow_vfta */
2110 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2111 adapter->shadow_vfta[i] = 0;
2112 /* Generate shadow_vfta from ec_vids */
2113 ETHER_LOCK(ec);
2114 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2115 uint32_t idx;
2116
2117 idx = vlanidp->vid / 32;
2118 KASSERT(idx < IXGBE_VFTA_SIZE);
2119 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2120 }
2121 ETHER_UNLOCK(ec);
2122
2123 /*
2124 * A soft reset zero's out the VFTA, so
2125 * we need to repopulate it now.
2126 */
2127 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2128 if (adapter->shadow_vfta[i] == 0)
2129 continue;
2130 vfta = adapter->shadow_vfta[i];
2131 /*
2132 * Reconstruct the vlan id's
2133 * based on the bits set in each
2134 * of the array ints.
2135 */
2136 for (int j = 0; j < 32; j++) {
2137 retry = 0;
2138 if ((vfta & ((u32)1 << j)) == 0)
2139 continue;
2140 vid = (i * 32) + j;
2141
2142 /* Call the shared code mailbox routine */
2143 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2144 FALSE)) != 0) {
2145 if (++retry > 5) {
2146 device_printf(adapter->dev,
2147 "%s: max retry exceeded\n",
2148 __func__);
2149 break;
2150 }
2151 }
2152 if (rv != 0) {
2153 device_printf(adapter->dev,
2154 "failed to set vlan %d\n", vid);
2155 error = EACCES;
2156 }
2157 }
2158 }
2159 return error;
2160 } /* ixv_setup_vlan_support */
2161
2162 static int
2163 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2164 {
2165 struct ifnet *ifp = &ec->ec_if;
2166 struct adapter *adapter = ifp->if_softc;
2167 int rv;
2168
2169 if (set)
2170 rv = ixv_register_vlan(adapter, vid);
2171 else
2172 rv = ixv_unregister_vlan(adapter, vid);
2173
2174 if (rv != 0)
2175 return rv;
2176
2177 /*
2178 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2179 * or 0 to 1.
2180 */
2181 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2182 ixv_setup_vlan_tagging(adapter);
2183
2184 return rv;
2185 }
2186
2187 /************************************************************************
2188 * ixv_register_vlan
2189 *
2190 * Run via a vlan config EVENT, it enables us to use the
2191 * HW Filter table since we can get the vlan id. This just
2192 * creates the entry in the soft version of the VFTA, init
2193 * will repopulate the real table.
2194 ************************************************************************/
2195 static int
2196 ixv_register_vlan(struct adapter *adapter, u16 vtag)
2197 {
2198 struct ixgbe_hw *hw = &adapter->hw;
2199 u16 index, bit;
2200 int error;
2201
2202 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2203 return EINVAL;
2204 IXGBE_CORE_LOCK(adapter);
2205 index = (vtag >> 5) & 0x7F;
2206 bit = vtag & 0x1F;
2207 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2208 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2209 IXGBE_CORE_UNLOCK(adapter);
2210
2211 if (error != 0) {
2212 device_printf(adapter->dev, "failed to register vlan %hu\n",
2213 vtag);
2214 error = EACCES;
2215 }
2216 return error;
2217 } /* ixv_register_vlan */
2218
2219 /************************************************************************
2220 * ixv_unregister_vlan
2221 *
2222 * Run via a vlan unconfig EVENT, remove our entry
2223 * in the soft vfta.
2224 ************************************************************************/
2225 static int
2226 ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2227 {
2228 struct ixgbe_hw *hw = &adapter->hw;
2229 u16 index, bit;
2230 int error;
2231
2232 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2233 return EINVAL;
2234
2235 IXGBE_CORE_LOCK(adapter);
2236 index = (vtag >> 5) & 0x7F;
2237 bit = vtag & 0x1F;
2238 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2239 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2240 IXGBE_CORE_UNLOCK(adapter);
2241
2242 if (error != 0) {
2243 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2244 vtag);
2245 error = EIO;
2246 }
2247 return error;
2248 } /* ixv_unregister_vlan */
2249
2250 /************************************************************************
2251 * ixv_enable_intr
2252 ************************************************************************/
2253 static void
2254 ixv_enable_intr(struct adapter *adapter)
2255 {
2256 struct ixgbe_hw *hw = &adapter->hw;
2257 struct ix_queue *que = adapter->queues;
2258 u32 mask;
2259 int i;
2260
2261 /* For VTEIAC */
2262 mask = (1 << adapter->vector);
2263 for (i = 0; i < adapter->num_queues; i++, que++)
2264 mask |= (1 << que->msix);
2265 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2266
2267 /* For VTEIMS */
2268 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2269 que = adapter->queues;
2270 for (i = 0; i < adapter->num_queues; i++, que++)
2271 ixv_enable_queue(adapter, que->msix);
2272
2273 IXGBE_WRITE_FLUSH(hw);
2274 } /* ixv_enable_intr */
2275
2276 /************************************************************************
2277 * ixv_disable_intr
2278 ************************************************************************/
2279 static void
2280 ixv_disable_intr(struct adapter *adapter)
2281 {
2282 struct ix_queue *que = adapter->queues;
2283
2284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2285
2286 /* disable interrupts other than queues */
2287 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2288
2289 for (int i = 0; i < adapter->num_queues; i++, que++)
2290 ixv_disable_queue(adapter, que->msix);
2291
2292 IXGBE_WRITE_FLUSH(&adapter->hw);
2293 } /* ixv_disable_intr */
2294
2295 /************************************************************************
2296 * ixv_set_ivar
2297 *
2298 * Setup the correct IVAR register for a particular MSI-X interrupt
2299 * - entry is the register array entry
2300 * - vector is the MSI-X vector for this queue
2301 * - type is RX/TX/MISC
2302 ************************************************************************/
2303 static void
2304 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2305 {
2306 struct ixgbe_hw *hw = &adapter->hw;
2307 u32 ivar, index;
2308
2309 vector |= IXGBE_IVAR_ALLOC_VAL;
2310
2311 if (type == -1) { /* MISC IVAR */
2312 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2313 ivar &= ~0xFF;
2314 ivar |= vector;
2315 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2316 } else { /* RX/TX IVARS */
2317 index = (16 * (entry & 1)) + (8 * type);
2318 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2319 ivar &= ~(0xffUL << index);
2320 ivar |= ((u32)vector << index);
2321 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2322 }
2323 } /* ixv_set_ivar */
2324
2325 /************************************************************************
2326 * ixv_configure_ivars
2327 ************************************************************************/
2328 static void
2329 ixv_configure_ivars(struct adapter *adapter)
2330 {
2331 struct ix_queue *que = adapter->queues;
2332
2333 /* XXX We should sync EITR value calculation with ixgbe.c? */
2334
2335 for (int i = 0; i < adapter->num_queues; i++, que++) {
2336 /* First the RX queue entry */
2337 ixv_set_ivar(adapter, i, que->msix, 0);
2338 /* ... and the TX */
2339 ixv_set_ivar(adapter, i, que->msix, 1);
2340 /* Set an initial value in EITR */
2341 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2342 }
2343
2344 /* For the mailbox interrupt */
2345 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2346 } /* ixv_configure_ivars */
2347
2348
2349 /************************************************************************
2350 * ixv_save_stats
2351 *
2352 * The VF stats registers never have a truly virgin
2353 * starting point, so this routine tries to make an
2354 * artificial one, marking ground zero on attach as
2355 * it were.
2356 ************************************************************************/
2357 static void
2358 ixv_save_stats(struct adapter *adapter)
2359 {
2360 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2361
2362 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2363 stats->saved_reset_vfgprc +=
2364 stats->vfgprc.ev_count - stats->base_vfgprc;
2365 stats->saved_reset_vfgptc +=
2366 stats->vfgptc.ev_count - stats->base_vfgptc;
2367 stats->saved_reset_vfgorc +=
2368 stats->vfgorc.ev_count - stats->base_vfgorc;
2369 stats->saved_reset_vfgotc +=
2370 stats->vfgotc.ev_count - stats->base_vfgotc;
2371 stats->saved_reset_vfmprc +=
2372 stats->vfmprc.ev_count - stats->base_vfmprc;
2373 }
2374 } /* ixv_save_stats */
2375
2376 /************************************************************************
2377 * ixv_init_stats
2378 ************************************************************************/
2379 static void
2380 ixv_init_stats(struct adapter *adapter)
2381 {
2382 struct ixgbe_hw *hw = &adapter->hw;
2383
2384 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2385 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2386 adapter->stats.vf.last_vfgorc |=
2387 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2388
2389 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2390 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2391 adapter->stats.vf.last_vfgotc |=
2392 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2393
2394 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2395
2396 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2397 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2398 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2399 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2400 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2401 } /* ixv_init_stats */
2402
2403 #define UPDATE_STAT_32(reg, last, count) \
2404 { \
2405 u32 current = IXGBE_READ_REG(hw, (reg)); \
2406 if (current < (last)) \
2407 count.ev_count += 0x100000000LL; \
2408 (last) = current; \
2409 count.ev_count &= 0xFFFFFFFF00000000LL; \
2410 count.ev_count |= current; \
2411 }
2412
2413 #define UPDATE_STAT_36(lsb, msb, last, count) \
2414 { \
2415 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2416 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2417 u64 current = ((cur_msb << 32) | cur_lsb); \
2418 if (current < (last)) \
2419 count.ev_count += 0x1000000000LL; \
2420 (last) = current; \
2421 count.ev_count &= 0xFFFFFFF000000000LL; \
2422 count.ev_count |= current; \
2423 }
2424
2425 /************************************************************************
2426 * ixv_update_stats - Update the board statistics counters.
2427 ************************************************************************/
2428 void
2429 ixv_update_stats(struct adapter *adapter)
2430 {
2431 struct ixgbe_hw *hw = &adapter->hw;
2432 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2433
2434 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2435 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2436 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2437 stats->vfgorc);
2438 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2439 stats->vfgotc);
2440 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2441
2442 /* Fill out the OS statistics structure */
2443 /*
2444 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2445 * adapter->stats counters. It's required to make ifconfig -z
2446 * (SOICZIFDATA) work.
2447 */
2448 } /* ixv_update_stats */
2449
2450 /************************************************************************
2451 * ixv_sysctl_interrupt_rate_handler
2452 ************************************************************************/
2453 static int
2454 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2455 {
2456 struct sysctlnode node = *rnode;
2457 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2458 struct adapter *adapter = que->adapter;
2459 uint32_t reg, usec, rate;
2460 int error;
2461
2462 if (que == NULL)
2463 return 0;
2464 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2465 usec = ((reg & 0x0FF8) >> 3);
2466 if (usec > 0)
2467 rate = 500000 / usec;
2468 else
2469 rate = 0;
2470 node.sysctl_data = &rate;
2471 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2472 if (error || newp == NULL)
2473 return error;
2474 reg &= ~0xfff; /* default, no limitation */
2475 if (rate > 0 && rate < 500000) {
2476 if (rate < 1000)
2477 rate = 1000;
2478 reg |= ((4000000/rate) & 0xff8);
2479 /*
2480 * When RSC is used, ITR interval must be larger than
2481 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2482 * The minimum value is always greater than 2us on 100M
2483 * (and 10M?(not documented)), but it's not on 1G and higher.
2484 */
2485 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2486 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2487 if ((adapter->num_queues > 1)
2488 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2489 return EINVAL;
2490 }
2491 ixv_max_interrupt_rate = rate;
2492 } else
2493 ixv_max_interrupt_rate = 0;
2494 ixv_eitr_write(adapter, que->msix, reg);
2495
2496 return (0);
2497 } /* ixv_sysctl_interrupt_rate_handler */
2498
2499 const struct sysctlnode *
2500 ixv_sysctl_instance(struct adapter *adapter)
2501 {
2502 const char *dvname;
2503 struct sysctllog **log;
2504 int rc;
2505 const struct sysctlnode *rnode;
2506
2507 log = &adapter->sysctllog;
2508 dvname = device_xname(adapter->dev);
2509
2510 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2511 0, CTLTYPE_NODE, dvname,
2512 SYSCTL_DESCR("ixv information and settings"),
2513 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2514 goto err;
2515
2516 return rnode;
2517 err:
2518 device_printf(adapter->dev,
2519 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2520 return NULL;
2521 }
2522
2523 static void
2524 ixv_add_device_sysctls(struct adapter *adapter)
2525 {
2526 struct sysctllog **log;
2527 const struct sysctlnode *rnode, *cnode;
2528 device_t dev;
2529
2530 dev = adapter->dev;
2531 log = &adapter->sysctllog;
2532
2533 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2534 aprint_error_dev(dev, "could not create sysctl root\n");
2535 return;
2536 }
2537
2538 if (sysctl_createv(log, 0, &rnode, &cnode,
2539 CTLFLAG_READWRITE, CTLTYPE_INT,
2540 "debug", SYSCTL_DESCR("Debug Info"),
2541 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2542 aprint_error_dev(dev, "could not create sysctl\n");
2543
2544 if (sysctl_createv(log, 0, &rnode, &cnode,
2545 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2546 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2547 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2548 aprint_error_dev(dev, "could not create sysctl\n");
2549
2550 if (sysctl_createv(log, 0, &rnode, &cnode,
2551 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2552 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2553 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2554 aprint_error_dev(dev, "could not create sysctl\n");
2555 }
2556
2557 /************************************************************************
2558 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2559 ************************************************************************/
2560 static void
2561 ixv_add_stats_sysctls(struct adapter *adapter)
2562 {
2563 device_t dev = adapter->dev;
2564 struct tx_ring *txr = adapter->tx_rings;
2565 struct rx_ring *rxr = adapter->rx_rings;
2566 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2567 struct ixgbe_hw *hw = &adapter->hw;
2568 const struct sysctlnode *rnode, *cnode;
2569 struct sysctllog **log = &adapter->sysctllog;
2570 const char *xname = device_xname(dev);
2571
2572 /* Driver Statistics */
2573 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2574 NULL, xname, "Driver tx dma soft fail EFBIG");
2575 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2576 NULL, xname, "m_defrag() failed");
2577 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2578 NULL, xname, "Driver tx dma hard fail EFBIG");
2579 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2580 NULL, xname, "Driver tx dma hard fail EINVAL");
2581 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2582 NULL, xname, "Driver tx dma hard fail other");
2583 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2584 NULL, xname, "Driver tx dma soft fail EAGAIN");
2585 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2586 NULL, xname, "Driver tx dma soft fail ENOMEM");
2587 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2588 NULL, xname, "Watchdog timeouts");
2589 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2590 NULL, xname, "TSO errors");
2591 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2592 NULL, xname, "Link MSI-X IRQ Handled");
2593
2594 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2595 snprintf(adapter->queues[i].evnamebuf,
2596 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2597 xname, i);
2598 snprintf(adapter->queues[i].namebuf,
2599 sizeof(adapter->queues[i].namebuf), "q%d", i);
2600
2601 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2602 aprint_error_dev(dev, "could not create sysctl root\n");
2603 break;
2604 }
2605
2606 if (sysctl_createv(log, 0, &rnode, &rnode,
2607 0, CTLTYPE_NODE,
2608 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2609 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2610 break;
2611
2612 if (sysctl_createv(log, 0, &rnode, &cnode,
2613 CTLFLAG_READWRITE, CTLTYPE_INT,
2614 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2615 ixv_sysctl_interrupt_rate_handler, 0,
2616 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2617 break;
2618
2619 if (sysctl_createv(log, 0, &rnode, &cnode,
2620 CTLFLAG_READONLY, CTLTYPE_INT,
2621 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2622 ixv_sysctl_tdh_handler, 0, (void *)txr,
2623 0, CTL_CREATE, CTL_EOL) != 0)
2624 break;
2625
2626 if (sysctl_createv(log, 0, &rnode, &cnode,
2627 CTLFLAG_READONLY, CTLTYPE_INT,
2628 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2629 ixv_sysctl_tdt_handler, 0, (void *)txr,
2630 0, CTL_CREATE, CTL_EOL) != 0)
2631 break;
2632
2633 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2634 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2635 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2636 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2637 "Handled queue in softint");
2638 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2639 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2640 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2641 NULL, adapter->queues[i].evnamebuf, "TSO");
2642 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2643 NULL, adapter->queues[i].evnamebuf,
2644 "Queue No Descriptor Available");
2645 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2646 NULL, adapter->queues[i].evnamebuf,
2647 "Queue Packets Transmitted");
2648 #ifndef IXGBE_LEGACY_TX
2649 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2650 NULL, adapter->queues[i].evnamebuf,
2651 "Packets dropped in pcq");
2652 #endif
2653
2654 #ifdef LRO
2655 struct lro_ctrl *lro = &rxr->lro;
2656 #endif /* LRO */
2657
2658 if (sysctl_createv(log, 0, &rnode, &cnode,
2659 CTLFLAG_READONLY,
2660 CTLTYPE_INT,
2661 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2662 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2663 CTL_CREATE, CTL_EOL) != 0)
2664 break;
2665
2666 if (sysctl_createv(log, 0, &rnode, &cnode,
2667 CTLFLAG_READONLY,
2668 CTLTYPE_INT,
2669 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2670 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2671 CTL_CREATE, CTL_EOL) != 0)
2672 break;
2673
2674 if (sysctl_createv(log, 0, &rnode, &cnode,
2675 CTLFLAG_READONLY,
2676 CTLTYPE_INT,
2677 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2678 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2679 CTL_CREATE, CTL_EOL) != 0)
2680 break;
2681
2682 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2683 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2684 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2685 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2686 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2687 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2688 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2689 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2690 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2691 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2692 #ifdef LRO
2693 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2694 CTLFLAG_RD, &lro->lro_queued, 0,
2695 "LRO Queued");
2696 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2697 CTLFLAG_RD, &lro->lro_flushed, 0,
2698 "LRO Flushed");
2699 #endif /* LRO */
2700 }
2701
2702 /* MAC stats get their own sub node */
2703
2704 snprintf(stats->namebuf,
2705 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2706
2707 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2708 stats->namebuf, "rx csum offload - IP");
2709 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2710 stats->namebuf, "rx csum offload - L4");
2711 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2712 stats->namebuf, "rx csum offload - IP bad");
2713 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2714 stats->namebuf, "rx csum offload - L4 bad");
2715
2716 /* Packet Reception Stats */
2717 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2718 xname, "Good Packets Received");
2719 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2720 xname, "Good Octets Received");
2721 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2722 xname, "Multicast Packets Received");
2723 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2724 xname, "Good Packets Transmitted");
2725 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2726 xname, "Good Octets Transmitted");
2727
2728 /* Mailbox Stats */
2729 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2730 xname, "message TXs");
2731 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2732 xname, "message RXs");
2733 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2734 xname, "ACKs");
2735 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2736 xname, "REQs");
2737 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2738 xname, "RSTs");
2739
2740 } /* ixv_add_stats_sysctls */
2741
2742 static void
2743 ixv_clear_evcnt(struct adapter *adapter)
2744 {
2745 struct tx_ring *txr = adapter->tx_rings;
2746 struct rx_ring *rxr = adapter->rx_rings;
2747 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2748 struct ixgbe_hw *hw = &adapter->hw;
2749 int i;
2750
2751 /* Driver Statistics */
2752 adapter->efbig_tx_dma_setup.ev_count = 0;
2753 adapter->mbuf_defrag_failed.ev_count = 0;
2754 adapter->efbig2_tx_dma_setup.ev_count = 0;
2755 adapter->einval_tx_dma_setup.ev_count = 0;
2756 adapter->other_tx_dma_setup.ev_count = 0;
2757 adapter->eagain_tx_dma_setup.ev_count = 0;
2758 adapter->enomem_tx_dma_setup.ev_count = 0;
2759 adapter->watchdog_events.ev_count = 0;
2760 adapter->tso_err.ev_count = 0;
2761 adapter->link_irq.ev_count = 0;
2762
2763 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2764 adapter->queues[i].irqs.ev_count = 0;
2765 adapter->queues[i].handleq.ev_count = 0;
2766 adapter->queues[i].req.ev_count = 0;
2767 txr->tso_tx.ev_count = 0;
2768 txr->no_desc_avail.ev_count = 0;
2769 txr->total_packets.ev_count = 0;
2770 #ifndef IXGBE_LEGACY_TX
2771 txr->pcq_drops.ev_count = 0;
2772 #endif
2773 txr->q_efbig_tx_dma_setup = 0;
2774 txr->q_mbuf_defrag_failed = 0;
2775 txr->q_efbig2_tx_dma_setup = 0;
2776 txr->q_einval_tx_dma_setup = 0;
2777 txr->q_other_tx_dma_setup = 0;
2778 txr->q_eagain_tx_dma_setup = 0;
2779 txr->q_enomem_tx_dma_setup = 0;
2780 txr->q_tso_err = 0;
2781
2782 rxr->rx_packets.ev_count = 0;
2783 rxr->rx_bytes.ev_count = 0;
2784 rxr->rx_copies.ev_count = 0;
2785 rxr->no_jmbuf.ev_count = 0;
2786 rxr->rx_discarded.ev_count = 0;
2787 }
2788
2789 /* MAC stats get their own sub node */
2790
2791 stats->ipcs.ev_count = 0;
2792 stats->l4cs.ev_count = 0;
2793 stats->ipcs_bad.ev_count = 0;
2794 stats->l4cs_bad.ev_count = 0;
2795
2796 /* Packet Reception Stats */
2797 stats->vfgprc.ev_count = 0;
2798 stats->vfgorc.ev_count = 0;
2799 stats->vfmprc.ev_count = 0;
2800 stats->vfgptc.ev_count = 0;
2801 stats->vfgotc.ev_count = 0;
2802
2803 /* Mailbox Stats */
2804 hw->mbx.stats.msgs_tx.ev_count = 0;
2805 hw->mbx.stats.msgs_rx.ev_count = 0;
2806 hw->mbx.stats.acks.ev_count = 0;
2807 hw->mbx.stats.reqs.ev_count = 0;
2808 hw->mbx.stats.rsts.ev_count = 0;
2809
2810 } /* ixv_clear_evcnt */
2811
2812 /************************************************************************
2813 * ixv_set_sysctl_value
2814 ************************************************************************/
2815 static void
2816 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2817 const char *description, int *limit, int value)
2818 {
2819 device_t dev = adapter->dev;
2820 struct sysctllog **log;
2821 const struct sysctlnode *rnode, *cnode;
2822
2823 log = &adapter->sysctllog;
2824 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2825 aprint_error_dev(dev, "could not create sysctl root\n");
2826 return;
2827 }
2828 if (sysctl_createv(log, 0, &rnode, &cnode,
2829 CTLFLAG_READWRITE, CTLTYPE_INT,
2830 name, SYSCTL_DESCR(description),
2831 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2832 aprint_error_dev(dev, "could not create sysctl\n");
2833 *limit = value;
2834 } /* ixv_set_sysctl_value */
2835
2836 /************************************************************************
2837 * ixv_print_debug_info
2838 *
2839 * Called only when em_display_debug_stats is enabled.
2840 * Provides a way to take a look at important statistics
2841 * maintained by the driver and hardware.
2842 ************************************************************************/
2843 static void
2844 ixv_print_debug_info(struct adapter *adapter)
2845 {
2846 device_t dev = adapter->dev;
2847 struct ix_queue *que = adapter->queues;
2848 struct rx_ring *rxr;
2849 struct tx_ring *txr;
2850 #ifdef LRO
2851 struct lro_ctrl *lro;
2852 #endif /* LRO */
2853
2854 for (int i = 0; i < adapter->num_queues; i++, que++) {
2855 txr = que->txr;
2856 rxr = que->rxr;
2857 #ifdef LRO
2858 lro = &rxr->lro;
2859 #endif /* LRO */
2860 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2861 que->msix, (long)que->irqs.ev_count);
2862 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2863 rxr->me, (long long)rxr->rx_packets.ev_count);
2864 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2865 rxr->me, (long)rxr->rx_bytes.ev_count);
2866 #ifdef LRO
2867 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2868 rxr->me, (uintmax_t)lro->lro_queued);
2869 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2870 rxr->me, (uintmax_t)lro->lro_flushed);
2871 #endif /* LRO */
2872 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2873 txr->me, (long)txr->total_packets.ev_count);
2874 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2875 txr->me, (long)txr->no_desc_avail.ev_count);
2876 }
2877
2878 device_printf(dev, "MBX IRQ Handled: %lu\n",
2879 (long)adapter->link_irq.ev_count);
2880 } /* ixv_print_debug_info */
2881
2882 /************************************************************************
2883 * ixv_sysctl_debug
2884 ************************************************************************/
2885 static int
2886 ixv_sysctl_debug(SYSCTLFN_ARGS)
2887 {
2888 struct sysctlnode node = *rnode;
2889 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2890 int error, result;
2891
2892 node.sysctl_data = &result;
2893 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2894
2895 if (error || newp == NULL)
2896 return error;
2897
2898 if (result == 1)
2899 ixv_print_debug_info(adapter);
2900
2901 return 0;
2902 } /* ixv_sysctl_debug */
2903
2904 /************************************************************************
2905 * ixv_init_device_features
2906 ************************************************************************/
2907 static void
2908 ixv_init_device_features(struct adapter *adapter)
2909 {
2910 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2911 | IXGBE_FEATURE_VF
2912 | IXGBE_FEATURE_RSS
2913 | IXGBE_FEATURE_LEGACY_TX;
2914
2915 /* A tad short on feature flags for VFs, atm. */
2916 switch (adapter->hw.mac.type) {
2917 case ixgbe_mac_82599_vf:
2918 break;
2919 case ixgbe_mac_X540_vf:
2920 break;
2921 case ixgbe_mac_X550_vf:
2922 case ixgbe_mac_X550EM_x_vf:
2923 case ixgbe_mac_X550EM_a_vf:
2924 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2925 break;
2926 default:
2927 break;
2928 }
2929
2930 /* Enabled by default... */
2931 /* Is a virtual function (VF) */
2932 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2933 adapter->feat_en |= IXGBE_FEATURE_VF;
2934 /* Netmap */
2935 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2936 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2937 /* Receive-Side Scaling (RSS) */
2938 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2939 adapter->feat_en |= IXGBE_FEATURE_RSS;
2940 /* Needs advanced context descriptor regardless of offloads req'd */
2941 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2942 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2943
2944 /* Enabled via sysctl... */
2945 /* Legacy (single queue) transmit */
2946 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2947 ixv_enable_legacy_tx)
2948 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2949 } /* ixv_init_device_features */
2950
2951 /************************************************************************
2952 * ixv_shutdown - Shutdown entry point
2953 ************************************************************************/
2954 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2955 static int
2956 ixv_shutdown(device_t dev)
2957 {
2958 struct adapter *adapter = device_private(dev);
2959 IXGBE_CORE_LOCK(adapter);
2960 ixv_stop(adapter);
2961 IXGBE_CORE_UNLOCK(adapter);
2962
2963 return (0);
2964 } /* ixv_shutdown */
2965 #endif
2966
2967 static int
2968 ixv_ifflags_cb(struct ethercom *ec)
2969 {
2970 struct ifnet *ifp = &ec->ec_if;
2971 struct adapter *adapter = ifp->if_softc;
2972 u_short saved_flags;
2973 u_short change;
2974 int rv = 0;
2975
2976 IXGBE_CORE_LOCK(adapter);
2977
2978 saved_flags = adapter->if_flags;
2979 change = ifp->if_flags ^ adapter->if_flags;
2980 if (change != 0)
2981 adapter->if_flags = ifp->if_flags;
2982
2983 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2984 rv = ENETRESET;
2985 goto out;
2986 } else if ((change & IFF_PROMISC) != 0) {
2987 rv = ixv_set_promisc(adapter);
2988 if (rv != 0) {
2989 /* Restore previous */
2990 adapter->if_flags = saved_flags;
2991 goto out;
2992 }
2993 }
2994
2995 /* Check for ec_capenable. */
2996 change = ec->ec_capenable ^ adapter->ec_capenable;
2997 adapter->ec_capenable = ec->ec_capenable;
2998 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
2999 | ETHERCAP_VLAN_HWFILTER)) != 0) {
3000 rv = ENETRESET;
3001 goto out;
3002 }
3003
3004 /*
3005 * Special handling is not required for ETHERCAP_VLAN_MTU.
3006 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
3007 */
3008
3009 /* Set up VLAN support and filter */
3010 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
3011 rv = ixv_setup_vlan_support(adapter);
3012
3013 out:
3014 IXGBE_CORE_UNLOCK(adapter);
3015
3016 return rv;
3017 }
3018
3019
3020 /************************************************************************
3021 * ixv_ioctl - Ioctl entry point
3022 *
3023 * Called when the user wants to configure the interface.
3024 *
3025 * return 0 on success, positive on failure
3026 ************************************************************************/
3027 static int
3028 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
3029 {
3030 struct adapter *adapter = ifp->if_softc;
3031 struct ixgbe_hw *hw = &adapter->hw;
3032 struct ifcapreq *ifcr = data;
3033 int error;
3034 int l4csum_en;
3035 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
3036 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3037
3038 switch (command) {
3039 case SIOCSIFFLAGS:
3040 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
3041 break;
3042 case SIOCADDMULTI: {
3043 struct ether_multi *enm;
3044 struct ether_multistep step;
3045 struct ethercom *ec = &adapter->osdep.ec;
3046 bool overflow = false;
3047 int mcnt = 0;
3048
3049 /*
3050 * Check the number of multicast address. If it exceeds,
3051 * return ENOSPC.
3052 * Update this code when we support API 1.3.
3053 */
3054 ETHER_LOCK(ec);
3055 ETHER_FIRST_MULTI(step, ec, enm);
3056 while (enm != NULL) {
3057 mcnt++;
3058
3059 /*
3060 * This code is before adding, so one room is required
3061 * at least.
3062 */
3063 if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3064 overflow = true;
3065 break;
3066 }
3067 ETHER_NEXT_MULTI(step, enm);
3068 }
3069 ETHER_UNLOCK(ec);
3070 error = 0;
3071 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3072 error = hw->mac.ops.update_xcast_mode(hw,
3073 IXGBEVF_XCAST_MODE_ALLMULTI);
3074 if (error == IXGBE_ERR_NOT_TRUSTED) {
3075 device_printf(adapter->dev,
3076 "this interface is not trusted\n");
3077 error = EPERM;
3078 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3079 device_printf(adapter->dev,
3080 "the PF doesn't support allmulti mode\n");
3081 error = EOPNOTSUPP;
3082 } else if (error) {
3083 device_printf(adapter->dev,
3084 "number of Ethernet multicast addresses "
3085 "exceeds the limit (%d). error = %d\n",
3086 IXGBE_MAX_VF_MC, error);
3087 error = ENOSPC;
3088 } else
3089 ec->ec_flags |= ETHER_F_ALLMULTI;
3090 }
3091 if (error)
3092 return error;
3093 }
3094 /*FALLTHROUGH*/
3095 case SIOCDELMULTI:
3096 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3097 break;
3098 case SIOCSIFMEDIA:
3099 case SIOCGIFMEDIA:
3100 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3101 break;
3102 case SIOCSIFCAP:
3103 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3104 break;
3105 case SIOCSIFMTU:
3106 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3107 break;
3108 case SIOCZIFDATA:
3109 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3110 ixv_update_stats(adapter);
3111 ixv_clear_evcnt(adapter);
3112 break;
3113 default:
3114 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3115 break;
3116 }
3117
3118 switch (command) {
3119 case SIOCSIFCAP:
3120 /* Layer-4 Rx checksum offload has to be turned on and
3121 * off as a unit.
3122 */
3123 l4csum_en = ifcr->ifcr_capenable & l4csum;
3124 if (l4csum_en != l4csum && l4csum_en != 0)
3125 return EINVAL;
3126 /*FALLTHROUGH*/
3127 case SIOCADDMULTI:
3128 case SIOCDELMULTI:
3129 case SIOCSIFFLAGS:
3130 case SIOCSIFMTU:
3131 default:
3132 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3133 return error;
3134 if ((ifp->if_flags & IFF_RUNNING) == 0)
3135 ;
3136 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3137 IXGBE_CORE_LOCK(adapter);
3138 ixv_init_locked(adapter);
3139 IXGBE_CORE_UNLOCK(adapter);
3140 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3141 /*
3142 * Multicast list has changed; set the hardware filter
3143 * accordingly.
3144 */
3145 IXGBE_CORE_LOCK(adapter);
3146 ixv_disable_intr(adapter);
3147 ixv_set_multi(adapter);
3148 ixv_enable_intr(adapter);
3149 IXGBE_CORE_UNLOCK(adapter);
3150 }
3151 return 0;
3152 }
3153 } /* ixv_ioctl */
3154
3155 /************************************************************************
3156 * ixv_init
3157 ************************************************************************/
3158 static int
3159 ixv_init(struct ifnet *ifp)
3160 {
3161 struct adapter *adapter = ifp->if_softc;
3162
3163 IXGBE_CORE_LOCK(adapter);
3164 ixv_init_locked(adapter);
3165 IXGBE_CORE_UNLOCK(adapter);
3166
3167 return 0;
3168 } /* ixv_init */
3169
3170 /************************************************************************
3171 * ixv_handle_que
3172 ************************************************************************/
3173 static void
3174 ixv_handle_que(void *context)
3175 {
3176 struct ix_queue *que = context;
3177 struct adapter *adapter = que->adapter;
3178 struct tx_ring *txr = que->txr;
3179 struct ifnet *ifp = adapter->ifp;
3180 bool more;
3181
3182 que->handleq.ev_count++;
3183
3184 if (ifp->if_flags & IFF_RUNNING) {
3185 more = ixgbe_rxeof(que);
3186 IXGBE_TX_LOCK(txr);
3187 more |= ixgbe_txeof(txr);
3188 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
3189 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3190 ixgbe_mq_start_locked(ifp, txr);
3191 /* Only for queue 0 */
3192 /* NetBSD still needs this for CBQ */
3193 if ((&adapter->queues[0] == que)
3194 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3195 ixgbe_legacy_start_locked(ifp, txr);
3196 IXGBE_TX_UNLOCK(txr);
3197 if (more) {
3198 que->req.ev_count++;
3199 if (adapter->txrx_use_workqueue) {
3200 /*
3201 * "enqueued flag" is not required here
3202 * the same as ixg(4). See ixgbe_msix_que().
3203 */
3204 workqueue_enqueue(adapter->que_wq,
3205 &que->wq_cookie, curcpu());
3206 } else
3207 softint_schedule(que->que_si);
3208 return;
3209 }
3210 }
3211
3212 /* Re-enable this interrupt */
3213 ixv_enable_queue(adapter, que->msix);
3214
3215 return;
3216 } /* ixv_handle_que */
3217
3218 /************************************************************************
3219 * ixv_handle_que_work
3220 ************************************************************************/
3221 static void
3222 ixv_handle_que_work(struct work *wk, void *context)
3223 {
3224 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3225
3226 /*
3227 * "enqueued flag" is not required here the same as ixg(4).
3228 * See ixgbe_msix_que().
3229 */
3230 ixv_handle_que(que);
3231 }
3232
3233 /************************************************************************
3234 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3235 ************************************************************************/
3236 static int
3237 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
3238 {
3239 device_t dev = adapter->dev;
3240 struct ix_queue *que = adapter->queues;
3241 struct tx_ring *txr = adapter->tx_rings;
3242 int error, msix_ctrl, rid, vector = 0;
3243 pci_chipset_tag_t pc;
3244 pcitag_t tag;
3245 char intrbuf[PCI_INTRSTR_LEN];
3246 char wqname[MAXCOMLEN];
3247 char intr_xname[32];
3248 const char *intrstr = NULL;
3249 kcpuset_t *affinity;
3250 int cpu_id = 0;
3251
3252 pc = adapter->osdep.pc;
3253 tag = adapter->osdep.tag;
3254
3255 adapter->osdep.nintrs = adapter->num_queues + 1;
3256 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3257 adapter->osdep.nintrs) != 0) {
3258 aprint_error_dev(dev,
3259 "failed to allocate MSI-X interrupt\n");
3260 return (ENXIO);
3261 }
3262
3263 kcpuset_create(&affinity, false);
3264 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3265 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3266 device_xname(dev), i);
3267 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3268 sizeof(intrbuf));
3269 #ifdef IXGBE_MPSAFE
3270 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3271 true);
3272 #endif
3273 /* Set the handler function */
3274 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3275 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3276 intr_xname);
3277 if (que->res == NULL) {
3278 pci_intr_release(pc, adapter->osdep.intrs,
3279 adapter->osdep.nintrs);
3280 aprint_error_dev(dev,
3281 "Failed to register QUE handler\n");
3282 kcpuset_destroy(affinity);
3283 return (ENXIO);
3284 }
3285 que->msix = vector;
3286 adapter->active_queues |= (u64)(1 << que->msix);
3287
3288 cpu_id = i;
3289 /* Round-robin affinity */
3290 kcpuset_zero(affinity);
3291 kcpuset_set(affinity, cpu_id % ncpu);
3292 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3293 NULL);
3294 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3295 intrstr);
3296 if (error == 0)
3297 aprint_normal(", bound queue %d to cpu %d\n",
3298 i, cpu_id % ncpu);
3299 else
3300 aprint_normal("\n");
3301
3302 #ifndef IXGBE_LEGACY_TX
3303 txr->txr_si
3304 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3305 ixgbe_deferred_mq_start, txr);
3306 #endif
3307 que->que_si
3308 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3309 ixv_handle_que, que);
3310 if (que->que_si == NULL) {
3311 aprint_error_dev(dev,
3312 "could not establish software interrupt\n");
3313 }
3314 }
3315 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3316 error = workqueue_create(&adapter->txr_wq, wqname,
3317 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3318 IXGBE_WORKQUEUE_FLAGS);
3319 if (error) {
3320 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
3321 }
3322 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3323
3324 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3325 error = workqueue_create(&adapter->que_wq, wqname,
3326 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3327 IXGBE_WORKQUEUE_FLAGS);
3328 if (error) {
3329 aprint_error_dev(dev,
3330 "couldn't create workqueue\n");
3331 }
3332
3333 /* and Mailbox */
3334 cpu_id++;
3335 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3336 adapter->vector = vector;
3337 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3338 sizeof(intrbuf));
3339 #ifdef IXGBE_MPSAFE
3340 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3341 true);
3342 #endif
3343 /* Set the mbx handler function */
3344 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3345 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3346 intr_xname);
3347 if (adapter->osdep.ihs[vector] == NULL) {
3348 aprint_error_dev(dev, "Failed to register LINK handler\n");
3349 kcpuset_destroy(affinity);
3350 return (ENXIO);
3351 }
3352 /* Round-robin affinity */
3353 kcpuset_zero(affinity);
3354 kcpuset_set(affinity, cpu_id % ncpu);
3355 error = interrupt_distribute(adapter->osdep.ihs[vector],
3356 affinity, NULL);
3357
3358 aprint_normal_dev(dev,
3359 "for link, interrupting at %s", intrstr);
3360 if (error == 0)
3361 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3362 else
3363 aprint_normal("\n");
3364
3365 /* Tasklets for Mailbox */
3366 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
3367 ixv_handle_link, adapter);
3368 /*
3369 * Due to a broken design QEMU will fail to properly
3370 * enable the guest for MSI-X unless the vectors in
3371 * the table are all set up, so we must rewrite the
3372 * ENABLE in the MSI-X control register again at this
3373 * point to cause it to successfully initialize us.
3374 */
3375 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3376 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3377 rid += PCI_MSIX_CTL;
3378 msix_ctrl = pci_conf_read(pc, tag, rid);
3379 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3380 pci_conf_write(pc, tag, rid, msix_ctrl);
3381 }
3382
3383 kcpuset_destroy(affinity);
3384 return (0);
3385 } /* ixv_allocate_msix */
3386
3387 /************************************************************************
3388 * ixv_configure_interrupts - Setup MSI-X resources
3389 *
3390 * Note: The VF device MUST use MSI-X, there is no fallback.
3391 ************************************************************************/
3392 static int
3393 ixv_configure_interrupts(struct adapter *adapter)
3394 {
3395 device_t dev = adapter->dev;
3396 int want, queues, msgs;
3397
3398 /* Must have at least 2 MSI-X vectors */
3399 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3400 if (msgs < 2) {
3401 aprint_error_dev(dev, "MSIX config error\n");
3402 return (ENXIO);
3403 }
3404 msgs = MIN(msgs, IXG_MAX_NINTR);
3405
3406 /* Figure out a reasonable auto config value */
3407 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3408
3409 if (ixv_num_queues != 0)
3410 queues = ixv_num_queues;
3411 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3412 queues = IXGBE_VF_MAX_TX_QUEUES;
3413
3414 /*
3415 * Want vectors for the queues,
3416 * plus an additional for mailbox.
3417 */
3418 want = queues + 1;
3419 if (msgs >= want)
3420 msgs = want;
3421 else {
3422 aprint_error_dev(dev,
3423 "MSI-X Configuration Problem, "
3424 "%d vectors but %d queues wanted!\n",
3425 msgs, want);
3426 return -1;
3427 }
3428
3429 adapter->msix_mem = (void *)1; /* XXX */
3430 aprint_normal_dev(dev,
3431 "Using MSI-X interrupts with %d vectors\n", msgs);
3432 adapter->num_queues = queues;
3433
3434 return (0);
3435 } /* ixv_configure_interrupts */
3436
3437
3438 /************************************************************************
3439 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3440 *
3441 * Done outside of interrupt context since the driver might sleep
3442 ************************************************************************/
3443 static void
3444 ixv_handle_link(void *context)
3445 {
3446 struct adapter *adapter = context;
3447
3448 IXGBE_CORE_LOCK(adapter);
3449
3450 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3451 &adapter->link_up, FALSE);
3452 ixv_update_link_status(adapter);
3453
3454 IXGBE_CORE_UNLOCK(adapter);
3455 } /* ixv_handle_link */
3456
3457 /************************************************************************
3458 * ixv_check_link - Used in the local timer to poll for link changes
3459 ************************************************************************/
3460 static s32
3461 ixv_check_link(struct adapter *adapter)
3462 {
3463 s32 error;
3464
3465 KASSERT(mutex_owned(&adapter->core_mtx));
3466
3467 adapter->hw.mac.get_link_status = TRUE;
3468
3469 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3470 &adapter->link_speed, &adapter->link_up, FALSE);
3471 ixv_update_link_status(adapter);
3472
3473 return error;
3474 } /* ixv_check_link */
3475