ixv.c revision 1.126 1 /*$NetBSD: ixv.c,v 1.126 2019/08/20 04:11:22 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #ifdef _KERNEL_OPT
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_net_mpsafe.h"
41 #endif
42
43 #include "ixgbe.h"
44 #include "vlan.h"
45
46 /************************************************************************
47 * Driver version
48 ************************************************************************/
49 static const char ixv_driver_version[] = "2.0.1-k";
50 /* XXX NetBSD: + 1.5.17 */
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static s32 ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
122
123 static void ixv_setup_vlan_tagging(struct adapter *);
124 static int ixv_setup_vlan_support(struct adapter *);
125 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
126 static int ixv_register_vlan(void *, struct ifnet *, u16);
127 static int ixv_unregister_vlan(void *, struct ifnet *, u16);
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134
135 /* Sysctl handlers */
136 static void ixv_set_sysctl_value(struct adapter *, const char *,
137 const char *, int *, int);
138 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
139 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
140 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
144
145 /* The MSI-X Interrupt handlers */
146 static int ixv_msix_que(void *);
147 static int ixv_msix_mbx(void *);
148
149 /* Deferred interrupt tasklets */
150 static void ixv_handle_que(void *);
151 static void ixv_handle_link(void *);
152
153 /* Workqueue handler for deferred work */
154 static void ixv_handle_que_work(struct work *, void *);
155
156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
157 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
158
159 /************************************************************************
160 * FreeBSD Device Interface Entry Points
161 ************************************************************************/
162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
164 DVF_DETACH_SHUTDOWN);
165
166 #if 0
167 static driver_t ixv_driver = {
168 "ixv", ixv_methods, sizeof(struct adapter),
169 };
170
171 devclass_t ixv_devclass;
172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
175 #endif
176
177 /*
178 * TUNEABLE PARAMETERS:
179 */
180
181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
182 static int ixv_num_queues = 0;
183 #define TUNABLE_INT(__x, __y)
184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
185
186 /*
187 * AIM: Adaptive Interrupt Moderation
188 * which means that the interrupt rate
189 * is varied over time based on the
190 * traffic for that interrupt vector
191 */
192 static bool ixv_enable_aim = false;
193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194
195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
197
198 /* How many packets rxeof tries to clean at a time */
199 static int ixv_rx_process_limit = 256;
200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201
202 /* How many packets txeof tries to clean at a time */
203 static int ixv_tx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
205
206 /* Which packet processing uses workqueue or softint */
207 static bool ixv_txrx_workqueue = false;
208
209 /*
210 * Number of TX descriptors per ring,
211 * setting higher than RX as this seems
212 * the better performing choice.
213 */
214 static int ixv_txd = PERFORM_TXD;
215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
216
217 /* Number of RX descriptors per ring */
218 static int ixv_rxd = PERFORM_RXD;
219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
220
221 /* Legacy Transmit (single queue) */
222 static int ixv_enable_legacy_tx = 0;
223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
224
225 #ifdef NET_MPSAFE
226 #define IXGBE_MPSAFE 1
227 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
228 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
229 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
230 #else
231 #define IXGBE_CALLOUT_FLAGS 0
232 #define IXGBE_SOFTINFT_FLAGS 0
233 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
234 #endif
235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
236
237 #if 0
238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
240 #endif
241
242 /************************************************************************
243 * ixv_probe - Device identification routine
244 *
245 * Determines if the driver should be loaded on
246 * adapter based on its PCI vendor/device ID.
247 *
248 * return BUS_PROBE_DEFAULT on success, positive on failure
249 ************************************************************************/
250 static int
251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
252 {
253 #ifdef __HAVE_PCI_MSI_MSIX
254 const struct pci_attach_args *pa = aux;
255
256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
257 #else
258 return 0;
259 #endif
260 } /* ixv_probe */
261
262 static const ixgbe_vendor_info_t *
263 ixv_lookup(const struct pci_attach_args *pa)
264 {
265 const ixgbe_vendor_info_t *ent;
266 pcireg_t subid;
267
268 INIT_DEBUGOUT("ixv_lookup: begin");
269
270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
271 return NULL;
272
273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
274
275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
279 (ent->subvendor_id == 0)) &&
280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
281 (ent->subdevice_id == 0))) {
282 return ent;
283 }
284 }
285
286 return NULL;
287 }
288
289 /************************************************************************
290 * ixv_attach - Device initialization routine
291 *
292 * Called when the driver is being loaded.
293 * Identifies the type of hardware, allocates all resources
294 * and initializes the hardware.
295 *
296 * return 0 on success, positive on failure
297 ************************************************************************/
298 static void
299 ixv_attach(device_t parent, device_t dev, void *aux)
300 {
301 struct adapter *adapter;
302 struct ixgbe_hw *hw;
303 int error = 0;
304 pcireg_t id, subid;
305 const ixgbe_vendor_info_t *ent;
306 const struct pci_attach_args *pa = aux;
307 const char *apivstr;
308 const char *str;
309 char buf[256];
310
311 INIT_DEBUGOUT("ixv_attach: begin");
312
313 /*
314 * Make sure BUSMASTER is set, on a VM under
315 * KVM it may not be and will break things.
316 */
317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
318
319 /* Allocate, clear, and link in our adapter structure */
320 adapter = device_private(dev);
321 adapter->dev = dev;
322 adapter->hw.back = adapter;
323 hw = &adapter->hw;
324
325 adapter->init_locked = ixv_init_locked;
326 adapter->stop_locked = ixv_stop;
327
328 adapter->osdep.pc = pa->pa_pc;
329 adapter->osdep.tag = pa->pa_tag;
330 if (pci_dma64_available(pa))
331 adapter->osdep.dmat = pa->pa_dmat64;
332 else
333 adapter->osdep.dmat = pa->pa_dmat;
334 adapter->osdep.attached = false;
335
336 ent = ixv_lookup(pa);
337
338 KASSERT(ent != NULL);
339
340 aprint_normal(": %s, Version - %s\n",
341 ixv_strings[ent->index], ixv_driver_version);
342
343 /* Core Lock Init*/
344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
345
346 /* Do base PCI setup - map BAR0 */
347 if (ixv_allocate_pci_resources(adapter, pa)) {
348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
349 error = ENXIO;
350 goto err_out;
351 }
352
353 /* SYSCTL APIs */
354 ixv_add_device_sysctls(adapter);
355
356 /* Set up the timer callout */
357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
358
359 /* Save off the information about this board */
360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
362 hw->vendor_id = PCI_VENDOR(id);
363 hw->device_id = PCI_PRODUCT(id);
364 hw->revision_id =
365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
368
369 /* A subset of set_mac_type */
370 switch (hw->device_id) {
371 case IXGBE_DEV_ID_82599_VF:
372 hw->mac.type = ixgbe_mac_82599_vf;
373 str = "82599 VF";
374 break;
375 case IXGBE_DEV_ID_X540_VF:
376 hw->mac.type = ixgbe_mac_X540_vf;
377 str = "X540 VF";
378 break;
379 case IXGBE_DEV_ID_X550_VF:
380 hw->mac.type = ixgbe_mac_X550_vf;
381 str = "X550 VF";
382 break;
383 case IXGBE_DEV_ID_X550EM_X_VF:
384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
385 str = "X550EM X VF";
386 break;
387 case IXGBE_DEV_ID_X550EM_A_VF:
388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
389 str = "X550EM A VF";
390 break;
391 default:
392 /* Shouldn't get here since probe succeeded */
393 aprint_error_dev(dev, "Unknown device ID!\n");
394 error = ENXIO;
395 goto err_out;
396 break;
397 }
398 aprint_normal_dev(dev, "device %s\n", str);
399
400 ixv_init_device_features(adapter);
401
402 /* Initialize the shared code */
403 error = ixgbe_init_ops_vf(hw);
404 if (error) {
405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
406 error = EIO;
407 goto err_out;
408 }
409
410 /* Setup the mailbox */
411 ixgbe_init_mbx_params_vf(hw);
412
413 /* Set the right number of segments */
414 adapter->num_segs = IXGBE_82599_SCATTER;
415
416 /* Reset mbox api to 1.0 */
417 error = hw->mac.ops.reset_hw(hw);
418 if (error == IXGBE_ERR_RESET_FAILED)
419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
420 else if (error)
421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
422 error);
423 if (error) {
424 error = EIO;
425 goto err_out;
426 }
427
428 error = hw->mac.ops.init_hw(hw);
429 if (error) {
430 aprint_error_dev(dev, "...init_hw() failed!\n");
431 error = EIO;
432 goto err_out;
433 }
434
435 /* Negotiate mailbox API version */
436 error = ixv_negotiate_api(adapter);
437 if (error)
438 aprint_normal_dev(dev,
439 "MBX API negotiation failed during attach!\n");
440 switch (hw->api_version) {
441 case ixgbe_mbox_api_10:
442 apivstr = "1.0";
443 break;
444 case ixgbe_mbox_api_20:
445 apivstr = "2.0";
446 break;
447 case ixgbe_mbox_api_11:
448 apivstr = "1.1";
449 break;
450 case ixgbe_mbox_api_12:
451 apivstr = "1.2";
452 break;
453 case ixgbe_mbox_api_13:
454 apivstr = "1.3";
455 break;
456 default:
457 apivstr = "unknown";
458 break;
459 }
460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
461
462 /* If no mac address was assigned, make a random one */
463 if (!ixv_check_ether_addr(hw->mac.addr)) {
464 u8 addr[ETHER_ADDR_LEN];
465 uint64_t rndval = cprng_strong64();
466
467 memcpy(addr, &rndval, sizeof(addr));
468 addr[0] &= 0xFE;
469 addr[0] |= 0x02;
470 bcopy(addr, hw->mac.addr, sizeof(addr));
471 }
472
473 /* Register for VLAN events */
474 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
475
476 /* Sysctls for limiting the amount of work done in the taskqueues */
477 ixv_set_sysctl_value(adapter, "rx_processing_limit",
478 "max number of rx packets to process",
479 &adapter->rx_process_limit, ixv_rx_process_limit);
480
481 ixv_set_sysctl_value(adapter, "tx_processing_limit",
482 "max number of tx packets to process",
483 &adapter->tx_process_limit, ixv_tx_process_limit);
484
485 /* Do descriptor calc and sanity checks */
486 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
487 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
488 aprint_error_dev(dev, "TXD config issue, using default!\n");
489 adapter->num_tx_desc = DEFAULT_TXD;
490 } else
491 adapter->num_tx_desc = ixv_txd;
492
493 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
494 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
495 aprint_error_dev(dev, "RXD config issue, using default!\n");
496 adapter->num_rx_desc = DEFAULT_RXD;
497 } else
498 adapter->num_rx_desc = ixv_rxd;
499
500 /* Setup MSI-X */
501 error = ixv_configure_interrupts(adapter);
502 if (error)
503 goto err_out;
504
505 /* Allocate our TX/RX Queues */
506 if (ixgbe_allocate_queues(adapter)) {
507 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
508 error = ENOMEM;
509 goto err_out;
510 }
511
512 /* hw.ix defaults init */
513 adapter->enable_aim = ixv_enable_aim;
514
515 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
516
517 error = ixv_allocate_msix(adapter, pa);
518 if (error) {
519 device_printf(dev, "ixv_allocate_msix() failed!\n");
520 goto err_late;
521 }
522
523 /* Setup OS specific network interface */
524 error = ixv_setup_interface(dev, adapter);
525 if (error != 0) {
526 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
527 goto err_late;
528 }
529
530 /* Do the stats setup */
531 ixv_save_stats(adapter);
532 ixv_init_stats(adapter);
533 ixv_add_stats_sysctls(adapter);
534
535 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
536 ixgbe_netmap_attach(adapter);
537
538 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
539 aprint_verbose_dev(dev, "feature cap %s\n", buf);
540 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
541 aprint_verbose_dev(dev, "feature ena %s\n", buf);
542
543 INIT_DEBUGOUT("ixv_attach: end");
544 adapter->osdep.attached = true;
545
546 return;
547
548 err_late:
549 ixgbe_free_transmit_structures(adapter);
550 ixgbe_free_receive_structures(adapter);
551 free(adapter->queues, M_DEVBUF);
552 err_out:
553 ixv_free_pci_resources(adapter);
554 IXGBE_CORE_LOCK_DESTROY(adapter);
555
556 return;
557 } /* ixv_attach */
558
559 /************************************************************************
560 * ixv_detach - Device removal routine
561 *
562 * Called when the driver is being removed.
563 * Stops the adapter and deallocates all the resources
564 * that were allocated for driver operation.
565 *
566 * return 0 on success, positive on failure
567 ************************************************************************/
568 static int
569 ixv_detach(device_t dev, int flags)
570 {
571 struct adapter *adapter = device_private(dev);
572 struct ixgbe_hw *hw = &adapter->hw;
573 struct ix_queue *que = adapter->queues;
574 struct tx_ring *txr = adapter->tx_rings;
575 struct rx_ring *rxr = adapter->rx_rings;
576 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
577
578 INIT_DEBUGOUT("ixv_detach: begin");
579 if (adapter->osdep.attached == false)
580 return 0;
581
582 /* Stop the interface. Callouts are stopped in it. */
583 ixv_ifstop(adapter->ifp, 1);
584
585 #if NVLAN > 0
586 /* Make sure VLANs are not using driver */
587 if (!VLAN_ATTACHED(&adapter->osdep.ec))
588 ; /* nothing to do: no VLANs */
589 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
590 vlan_ifdetach(adapter->ifp);
591 else {
592 aprint_error_dev(dev, "VLANs in use, detach first\n");
593 return EBUSY;
594 }
595 #endif
596
597 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
598 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
599 softint_disestablish(txr->txr_si);
600 softint_disestablish(que->que_si);
601 }
602 if (adapter->txr_wq != NULL)
603 workqueue_destroy(adapter->txr_wq);
604 if (adapter->txr_wq_enqueued != NULL)
605 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
606 if (adapter->que_wq != NULL)
607 workqueue_destroy(adapter->que_wq);
608
609 /* Drain the Mailbox(link) queue */
610 softint_disestablish(adapter->link_si);
611
612 ether_ifdetach(adapter->ifp);
613 callout_halt(&adapter->timer, NULL);
614
615 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
616 netmap_detach(adapter->ifp);
617
618 ixv_free_pci_resources(adapter);
619 #if 0 /* XXX the NetBSD port is probably missing something here */
620 bus_generic_detach(dev);
621 #endif
622 if_detach(adapter->ifp);
623 if_percpuq_destroy(adapter->ipq);
624
625 sysctl_teardown(&adapter->sysctllog);
626 evcnt_detach(&adapter->efbig_tx_dma_setup);
627 evcnt_detach(&adapter->mbuf_defrag_failed);
628 evcnt_detach(&adapter->efbig2_tx_dma_setup);
629 evcnt_detach(&adapter->einval_tx_dma_setup);
630 evcnt_detach(&adapter->other_tx_dma_setup);
631 evcnt_detach(&adapter->eagain_tx_dma_setup);
632 evcnt_detach(&adapter->enomem_tx_dma_setup);
633 evcnt_detach(&adapter->watchdog_events);
634 evcnt_detach(&adapter->tso_err);
635 evcnt_detach(&adapter->link_irq);
636
637 txr = adapter->tx_rings;
638 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
639 evcnt_detach(&adapter->queues[i].irqs);
640 evcnt_detach(&adapter->queues[i].handleq);
641 evcnt_detach(&adapter->queues[i].req);
642 evcnt_detach(&txr->no_desc_avail);
643 evcnt_detach(&txr->total_packets);
644 evcnt_detach(&txr->tso_tx);
645 #ifndef IXGBE_LEGACY_TX
646 evcnt_detach(&txr->pcq_drops);
647 #endif
648
649 evcnt_detach(&rxr->rx_packets);
650 evcnt_detach(&rxr->rx_bytes);
651 evcnt_detach(&rxr->rx_copies);
652 evcnt_detach(&rxr->no_jmbuf);
653 evcnt_detach(&rxr->rx_discarded);
654 }
655 evcnt_detach(&stats->ipcs);
656 evcnt_detach(&stats->l4cs);
657 evcnt_detach(&stats->ipcs_bad);
658 evcnt_detach(&stats->l4cs_bad);
659
660 /* Packet Reception Stats */
661 evcnt_detach(&stats->vfgorc);
662 evcnt_detach(&stats->vfgprc);
663 evcnt_detach(&stats->vfmprc);
664
665 /* Packet Transmission Stats */
666 evcnt_detach(&stats->vfgotc);
667 evcnt_detach(&stats->vfgptc);
668
669 /* Mailbox Stats */
670 evcnt_detach(&hw->mbx.stats.msgs_tx);
671 evcnt_detach(&hw->mbx.stats.msgs_rx);
672 evcnt_detach(&hw->mbx.stats.acks);
673 evcnt_detach(&hw->mbx.stats.reqs);
674 evcnt_detach(&hw->mbx.stats.rsts);
675
676 ixgbe_free_transmit_structures(adapter);
677 ixgbe_free_receive_structures(adapter);
678 for (int i = 0; i < adapter->num_queues; i++) {
679 struct ix_queue *lque = &adapter->queues[i];
680 mutex_destroy(&lque->dc_mtx);
681 }
682 free(adapter->queues, M_DEVBUF);
683
684 IXGBE_CORE_LOCK_DESTROY(adapter);
685
686 return (0);
687 } /* ixv_detach */
688
689 /************************************************************************
690 * ixv_init_locked - Init entry point
691 *
692 * Used in two ways: It is used by the stack as an init entry
693 * point in network interface structure. It is also used
694 * by the driver as a hw/sw initialization routine to get
695 * to a consistent state.
696 *
697 * return 0 on success, positive on failure
698 ************************************************************************/
699 static void
700 ixv_init_locked(struct adapter *adapter)
701 {
702 struct ifnet *ifp = adapter->ifp;
703 device_t dev = adapter->dev;
704 struct ixgbe_hw *hw = &adapter->hw;
705 struct ix_queue *que;
706 int error = 0;
707 uint32_t mask;
708 int i;
709
710 INIT_DEBUGOUT("ixv_init_locked: begin");
711 KASSERT(mutex_owned(&adapter->core_mtx));
712 hw->adapter_stopped = FALSE;
713 hw->mac.ops.stop_adapter(hw);
714 callout_stop(&adapter->timer);
715 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
716 que->disabled_count = 0;
717
718 /* reprogram the RAR[0] in case user changed it. */
719 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
720
721 /* Get the latest mac address, User can use a LAA */
722 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
723 IXGBE_ETH_LENGTH_OF_ADDRESS);
724 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
725
726 /* Prepare transmit descriptors and buffers */
727 if (ixgbe_setup_transmit_structures(adapter)) {
728 aprint_error_dev(dev, "Could not setup transmit structures\n");
729 ixv_stop(adapter);
730 return;
731 }
732
733 /* Reset VF and renegotiate mailbox API version */
734 hw->mac.ops.reset_hw(hw);
735 hw->mac.ops.start_hw(hw);
736 error = ixv_negotiate_api(adapter);
737 if (error)
738 device_printf(dev,
739 "Mailbox API negotiation failed in init_locked!\n");
740
741 ixv_initialize_transmit_units(adapter);
742
743 /* Setup Multicast table */
744 ixv_set_multi(adapter);
745
746 /*
747 * Determine the correct mbuf pool
748 * for doing jumbo/headersplit
749 */
750 if (ifp->if_mtu > ETHERMTU)
751 adapter->rx_mbuf_sz = MJUMPAGESIZE;
752 else
753 adapter->rx_mbuf_sz = MCLBYTES;
754
755 /* Prepare receive descriptors and buffers */
756 if (ixgbe_setup_receive_structures(adapter)) {
757 device_printf(dev, "Could not setup receive structures\n");
758 ixv_stop(adapter);
759 return;
760 }
761
762 /* Configure RX settings */
763 ixv_initialize_receive_units(adapter);
764
765 #if 0 /* XXX isn't it required? -- msaitoh */
766 /* Set the various hardware offload abilities */
767 ifp->if_hwassist = 0;
768 if (ifp->if_capenable & IFCAP_TSO4)
769 ifp->if_hwassist |= CSUM_TSO;
770 if (ifp->if_capenable & IFCAP_TXCSUM) {
771 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
772 #if __FreeBSD_version >= 800000
773 ifp->if_hwassist |= CSUM_SCTP;
774 #endif
775 }
776 #endif
777
778 /* Set up VLAN offload and filter */
779 ixv_setup_vlan_support(adapter);
780
781 /* Set up MSI-X routing */
782 ixv_configure_ivars(adapter);
783
784 /* Set up auto-mask */
785 mask = (1 << adapter->vector);
786 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
787 mask |= (1 << que->msix);
788 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
789
790 /* Set moderation on the Link interrupt */
791 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
792
793 /* Stats init */
794 ixv_init_stats(adapter);
795
796 /* Config/Enable Link */
797 hw->mac.get_link_status = TRUE;
798 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
799 FALSE);
800
801 /* Start watchdog */
802 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
803
804 /* And now turn on interrupts */
805 ixv_enable_intr(adapter);
806
807 /* Update saved flags. See ixgbe_ifflags_cb() */
808 adapter->if_flags = ifp->if_flags;
809 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
810
811 /* Now inform the stack we're ready */
812 ifp->if_flags |= IFF_RUNNING;
813 ifp->if_flags &= ~IFF_OACTIVE;
814
815 return;
816 } /* ixv_init_locked */
817
818 /************************************************************************
819 * ixv_enable_queue
820 ************************************************************************/
821 static inline void
822 ixv_enable_queue(struct adapter *adapter, u32 vector)
823 {
824 struct ixgbe_hw *hw = &adapter->hw;
825 struct ix_queue *que = &adapter->queues[vector];
826 u32 queue = 1UL << vector;
827 u32 mask;
828
829 mutex_enter(&que->dc_mtx);
830 if (que->disabled_count > 0 && --que->disabled_count > 0)
831 goto out;
832
833 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
834 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
835 out:
836 mutex_exit(&que->dc_mtx);
837 } /* ixv_enable_queue */
838
839 /************************************************************************
840 * ixv_disable_queue
841 ************************************************************************/
842 static inline void
843 ixv_disable_queue(struct adapter *adapter, u32 vector)
844 {
845 struct ixgbe_hw *hw = &adapter->hw;
846 struct ix_queue *que = &adapter->queues[vector];
847 u32 queue = 1UL << vector;
848 u32 mask;
849
850 mutex_enter(&que->dc_mtx);
851 if (que->disabled_count++ > 0)
852 goto out;
853
854 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
855 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
856 out:
857 mutex_exit(&que->dc_mtx);
858 } /* ixv_disable_queue */
859
860 #if 0
861 static inline void
862 ixv_rearm_queues(struct adapter *adapter, u64 queues)
863 {
864 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
865 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
866 } /* ixv_rearm_queues */
867 #endif
868
869
870 /************************************************************************
871 * ixv_msix_que - MSI-X Queue Interrupt Service routine
872 ************************************************************************/
873 static int
874 ixv_msix_que(void *arg)
875 {
876 struct ix_queue *que = arg;
877 struct adapter *adapter = que->adapter;
878 struct tx_ring *txr = que->txr;
879 struct rx_ring *rxr = que->rxr;
880 bool more;
881 u32 newitr = 0;
882
883 ixv_disable_queue(adapter, que->msix);
884 ++que->irqs.ev_count;
885
886 #ifdef __NetBSD__
887 /* Don't run ixgbe_rxeof in interrupt context */
888 more = true;
889 #else
890 more = ixgbe_rxeof(que);
891 #endif
892
893 IXGBE_TX_LOCK(txr);
894 ixgbe_txeof(txr);
895 IXGBE_TX_UNLOCK(txr);
896
897 /* Do AIM now? */
898
899 if (adapter->enable_aim == false)
900 goto no_calc;
901 /*
902 * Do Adaptive Interrupt Moderation:
903 * - Write out last calculated setting
904 * - Calculate based on average size over
905 * the last interval.
906 */
907 if (que->eitr_setting)
908 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
909
910 que->eitr_setting = 0;
911
912 /* Idle, do nothing */
913 if ((txr->bytes == 0) && (rxr->bytes == 0))
914 goto no_calc;
915
916 if ((txr->bytes) && (txr->packets))
917 newitr = txr->bytes/txr->packets;
918 if ((rxr->bytes) && (rxr->packets))
919 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
920 newitr += 24; /* account for hardware frame, crc */
921
922 /* set an upper boundary */
923 newitr = uimin(newitr, 3000);
924
925 /* Be nice to the mid range */
926 if ((newitr > 300) && (newitr < 1200))
927 newitr = (newitr / 3);
928 else
929 newitr = (newitr / 2);
930
931 /*
932 * When RSC is used, ITR interval must be larger than RSC_DELAY.
933 * Currently, we use 2us for RSC_DELAY. The minimum value is always
934 * greater than 2us on 100M (and 10M?(not documented)), but it's not
935 * on 1G and higher.
936 */
937 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
938 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
939 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
940 newitr = IXGBE_MIN_RSC_EITR_10G1G;
941 }
942
943 /* save for next interrupt */
944 que->eitr_setting = newitr;
945
946 /* Reset state */
947 txr->bytes = 0;
948 txr->packets = 0;
949 rxr->bytes = 0;
950 rxr->packets = 0;
951
952 no_calc:
953 if (more)
954 softint_schedule(que->que_si);
955 else /* Re-enable this interrupt */
956 ixv_enable_queue(adapter, que->msix);
957
958 return 1;
959 } /* ixv_msix_que */
960
961 /************************************************************************
962 * ixv_msix_mbx
963 ************************************************************************/
964 static int
965 ixv_msix_mbx(void *arg)
966 {
967 struct adapter *adapter = arg;
968 struct ixgbe_hw *hw = &adapter->hw;
969
970 ++adapter->link_irq.ev_count;
971 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
972
973 /* Link status change */
974 hw->mac.get_link_status = TRUE;
975 softint_schedule(adapter->link_si);
976
977 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
978
979 return 1;
980 } /* ixv_msix_mbx */
981
982 static void
983 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
984 {
985
986 /*
987 * Newer devices than 82598 have VF function, so this function is
988 * simple.
989 */
990 itr |= IXGBE_EITR_CNT_WDIS;
991
992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
993 }
994
995
996 /************************************************************************
997 * ixv_media_status - Media Ioctl callback
998 *
999 * Called whenever the user queries the status of
1000 * the interface using ifconfig.
1001 ************************************************************************/
1002 static void
1003 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1004 {
1005 struct adapter *adapter = ifp->if_softc;
1006
1007 INIT_DEBUGOUT("ixv_media_status: begin");
1008 IXGBE_CORE_LOCK(adapter);
1009 ixv_update_link_status(adapter);
1010
1011 ifmr->ifm_status = IFM_AVALID;
1012 ifmr->ifm_active = IFM_ETHER;
1013
1014 if (adapter->link_active != LINK_STATE_UP) {
1015 ifmr->ifm_active |= IFM_NONE;
1016 IXGBE_CORE_UNLOCK(adapter);
1017 return;
1018 }
1019
1020 ifmr->ifm_status |= IFM_ACTIVE;
1021
1022 switch (adapter->link_speed) {
1023 case IXGBE_LINK_SPEED_10GB_FULL:
1024 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1025 break;
1026 case IXGBE_LINK_SPEED_5GB_FULL:
1027 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1028 break;
1029 case IXGBE_LINK_SPEED_2_5GB_FULL:
1030 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1031 break;
1032 case IXGBE_LINK_SPEED_1GB_FULL:
1033 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1034 break;
1035 case IXGBE_LINK_SPEED_100_FULL:
1036 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1037 break;
1038 case IXGBE_LINK_SPEED_10_FULL:
1039 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1040 break;
1041 }
1042
1043 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1044
1045 IXGBE_CORE_UNLOCK(adapter);
1046 } /* ixv_media_status */
1047
1048 /************************************************************************
1049 * ixv_media_change - Media Ioctl callback
1050 *
1051 * Called when the user changes speed/duplex using
1052 * media/mediopt option with ifconfig.
1053 ************************************************************************/
1054 static int
1055 ixv_media_change(struct ifnet *ifp)
1056 {
1057 struct adapter *adapter = ifp->if_softc;
1058 struct ifmedia *ifm = &adapter->media;
1059
1060 INIT_DEBUGOUT("ixv_media_change: begin");
1061
1062 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1063 return (EINVAL);
1064
1065 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1066 case IFM_AUTO:
1067 break;
1068 default:
1069 device_printf(adapter->dev, "Only auto media type\n");
1070 return (EINVAL);
1071 }
1072
1073 return (0);
1074 } /* ixv_media_change */
1075
1076
1077 /************************************************************************
1078 * ixv_negotiate_api
1079 *
1080 * Negotiate the Mailbox API with the PF;
1081 * start with the most featured API first.
1082 ************************************************************************/
1083 static int
1084 ixv_negotiate_api(struct adapter *adapter)
1085 {
1086 struct ixgbe_hw *hw = &adapter->hw;
1087 int mbx_api[] = { ixgbe_mbox_api_11,
1088 ixgbe_mbox_api_10,
1089 ixgbe_mbox_api_unknown };
1090 int i = 0;
1091
1092 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1093 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1094 return (0);
1095 i++;
1096 }
1097
1098 return (EINVAL);
1099 } /* ixv_negotiate_api */
1100
1101
1102 /************************************************************************
1103 * ixv_set_multi - Multicast Update
1104 *
1105 * Called whenever multicast address list is updated.
1106 ************************************************************************/
1107 static void
1108 ixv_set_multi(struct adapter *adapter)
1109 {
1110 struct ether_multi *enm;
1111 struct ether_multistep step;
1112 struct ethercom *ec = &adapter->osdep.ec;
1113 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1114 u8 *update_ptr;
1115 int mcnt = 0;
1116
1117 KASSERT(mutex_owned(&adapter->core_mtx));
1118 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1119
1120 ETHER_LOCK(ec);
1121 ETHER_FIRST_MULTI(step, ec, enm);
1122 while (enm != NULL) {
1123 bcopy(enm->enm_addrlo,
1124 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1125 IXGBE_ETH_LENGTH_OF_ADDRESS);
1126 mcnt++;
1127 /* XXX This might be required --msaitoh */
1128 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1129 break;
1130 ETHER_NEXT_MULTI(step, enm);
1131 }
1132 ETHER_UNLOCK(ec);
1133
1134 update_ptr = mta;
1135
1136 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1137 ixv_mc_array_itr, TRUE);
1138 } /* ixv_set_multi */
1139
1140 /************************************************************************
1141 * ixv_mc_array_itr
1142 *
1143 * An iterator function needed by the multicast shared code.
1144 * It feeds the shared code routine the addresses in the
1145 * array of ixv_set_multi() one by one.
1146 ************************************************************************/
1147 static u8 *
1148 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1149 {
1150 u8 *addr = *update_ptr;
1151 u8 *newptr;
1152
1153 *vmdq = 0;
1154
1155 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1156 *update_ptr = newptr;
1157
1158 return addr;
1159 } /* ixv_mc_array_itr */
1160
1161 /************************************************************************
1162 * ixv_local_timer - Timer routine
1163 *
1164 * Checks for link status, updates statistics,
1165 * and runs the watchdog check.
1166 ************************************************************************/
1167 static void
1168 ixv_local_timer(void *arg)
1169 {
1170 struct adapter *adapter = arg;
1171
1172 IXGBE_CORE_LOCK(adapter);
1173 ixv_local_timer_locked(adapter);
1174 IXGBE_CORE_UNLOCK(adapter);
1175 }
1176
1177 static void
1178 ixv_local_timer_locked(void *arg)
1179 {
1180 struct adapter *adapter = arg;
1181 device_t dev = adapter->dev;
1182 struct ix_queue *que = adapter->queues;
1183 u64 queues = 0;
1184 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1185 int hung = 0;
1186 int i;
1187
1188 KASSERT(mutex_owned(&adapter->core_mtx));
1189
1190 if (ixv_check_link(adapter)) {
1191 ixv_init_locked(adapter);
1192 return;
1193 }
1194
1195 /* Stats Update */
1196 ixv_update_stats(adapter);
1197
1198 /* Update some event counters */
1199 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1200 que = adapter->queues;
1201 for (i = 0; i < adapter->num_queues; i++, que++) {
1202 struct tx_ring *txr = que->txr;
1203
1204 v0 += txr->q_efbig_tx_dma_setup;
1205 v1 += txr->q_mbuf_defrag_failed;
1206 v2 += txr->q_efbig2_tx_dma_setup;
1207 v3 += txr->q_einval_tx_dma_setup;
1208 v4 += txr->q_other_tx_dma_setup;
1209 v5 += txr->q_eagain_tx_dma_setup;
1210 v6 += txr->q_enomem_tx_dma_setup;
1211 v7 += txr->q_tso_err;
1212 }
1213 adapter->efbig_tx_dma_setup.ev_count = v0;
1214 adapter->mbuf_defrag_failed.ev_count = v1;
1215 adapter->efbig2_tx_dma_setup.ev_count = v2;
1216 adapter->einval_tx_dma_setup.ev_count = v3;
1217 adapter->other_tx_dma_setup.ev_count = v4;
1218 adapter->eagain_tx_dma_setup.ev_count = v5;
1219 adapter->enomem_tx_dma_setup.ev_count = v6;
1220 adapter->tso_err.ev_count = v7;
1221
1222 /*
1223 * Check the TX queues status
1224 * - mark hung queues so we don't schedule on them
1225 * - watchdog only if all queues show hung
1226 */
1227 que = adapter->queues;
1228 for (i = 0; i < adapter->num_queues; i++, que++) {
1229 /* Keep track of queues with work for soft irq */
1230 if (que->txr->busy)
1231 queues |= ((u64)1 << que->me);
1232 /*
1233 * Each time txeof runs without cleaning, but there
1234 * are uncleaned descriptors it increments busy. If
1235 * we get to the MAX we declare it hung.
1236 */
1237 if (que->busy == IXGBE_QUEUE_HUNG) {
1238 ++hung;
1239 /* Mark the queue as inactive */
1240 adapter->active_queues &= ~((u64)1 << que->me);
1241 continue;
1242 } else {
1243 /* Check if we've come back from hung */
1244 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1245 adapter->active_queues |= ((u64)1 << que->me);
1246 }
1247 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1248 device_printf(dev,
1249 "Warning queue %d appears to be hung!\n", i);
1250 que->txr->busy = IXGBE_QUEUE_HUNG;
1251 ++hung;
1252 }
1253 }
1254
1255 /* Only truly watchdog if all queues show hung */
1256 if (hung == adapter->num_queues)
1257 goto watchdog;
1258 #if 0
1259 else if (queues != 0) { /* Force an IRQ on queues with work */
1260 ixv_rearm_queues(adapter, queues);
1261 }
1262 #endif
1263
1264 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1265
1266 return;
1267
1268 watchdog:
1269
1270 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1271 adapter->ifp->if_flags &= ~IFF_RUNNING;
1272 adapter->watchdog_events.ev_count++;
1273 ixv_init_locked(adapter);
1274 } /* ixv_local_timer */
1275
1276 /************************************************************************
1277 * ixv_update_link_status - Update OS on link state
1278 *
1279 * Note: Only updates the OS on the cached link state.
1280 * The real check of the hardware only happens with
1281 * a link interrupt.
1282 ************************************************************************/
1283 static void
1284 ixv_update_link_status(struct adapter *adapter)
1285 {
1286 struct ifnet *ifp = adapter->ifp;
1287 device_t dev = adapter->dev;
1288
1289 KASSERT(mutex_owned(&adapter->core_mtx));
1290
1291 if (adapter->link_up) {
1292 if (adapter->link_active != LINK_STATE_UP) {
1293 if (bootverbose) {
1294 const char *bpsmsg;
1295
1296 switch (adapter->link_speed) {
1297 case IXGBE_LINK_SPEED_10GB_FULL:
1298 bpsmsg = "10 Gbps";
1299 break;
1300 case IXGBE_LINK_SPEED_5GB_FULL:
1301 bpsmsg = "5 Gbps";
1302 break;
1303 case IXGBE_LINK_SPEED_2_5GB_FULL:
1304 bpsmsg = "2.5 Gbps";
1305 break;
1306 case IXGBE_LINK_SPEED_1GB_FULL:
1307 bpsmsg = "1 Gbps";
1308 break;
1309 case IXGBE_LINK_SPEED_100_FULL:
1310 bpsmsg = "100 Mbps";
1311 break;
1312 case IXGBE_LINK_SPEED_10_FULL:
1313 bpsmsg = "10 Mbps";
1314 break;
1315 default:
1316 bpsmsg = "unknown speed";
1317 break;
1318 }
1319 device_printf(dev, "Link is up %s %s \n",
1320 bpsmsg, "Full Duplex");
1321 }
1322 adapter->link_active = LINK_STATE_UP;
1323 if_link_state_change(ifp, LINK_STATE_UP);
1324 }
1325 } else {
1326 /*
1327 * Do it when link active changes to DOWN. i.e.
1328 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1329 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1330 */
1331 if (adapter->link_active != LINK_STATE_DOWN) {
1332 if (bootverbose)
1333 device_printf(dev, "Link is Down\n");
1334 if_link_state_change(ifp, LINK_STATE_DOWN);
1335 adapter->link_active = LINK_STATE_DOWN;
1336 }
1337 }
1338 } /* ixv_update_link_status */
1339
1340
1341 /************************************************************************
1342 * ixv_stop - Stop the hardware
1343 *
1344 * Disables all traffic on the adapter by issuing a
1345 * global reset on the MAC and deallocates TX/RX buffers.
1346 ************************************************************************/
1347 static void
1348 ixv_ifstop(struct ifnet *ifp, int disable)
1349 {
1350 struct adapter *adapter = ifp->if_softc;
1351
1352 IXGBE_CORE_LOCK(adapter);
1353 ixv_stop(adapter);
1354 IXGBE_CORE_UNLOCK(adapter);
1355 }
1356
1357 static void
1358 ixv_stop(void *arg)
1359 {
1360 struct ifnet *ifp;
1361 struct adapter *adapter = arg;
1362 struct ixgbe_hw *hw = &adapter->hw;
1363
1364 ifp = adapter->ifp;
1365
1366 KASSERT(mutex_owned(&adapter->core_mtx));
1367
1368 INIT_DEBUGOUT("ixv_stop: begin\n");
1369 ixv_disable_intr(adapter);
1370
1371 /* Tell the stack that the interface is no longer active */
1372 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1373
1374 hw->mac.ops.reset_hw(hw);
1375 adapter->hw.adapter_stopped = FALSE;
1376 hw->mac.ops.stop_adapter(hw);
1377 callout_stop(&adapter->timer);
1378
1379 /* reprogram the RAR[0] in case user changed it. */
1380 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1381
1382 return;
1383 } /* ixv_stop */
1384
1385
1386 /************************************************************************
1387 * ixv_allocate_pci_resources
1388 ************************************************************************/
1389 static int
1390 ixv_allocate_pci_resources(struct adapter *adapter,
1391 const struct pci_attach_args *pa)
1392 {
1393 pcireg_t memtype, csr;
1394 device_t dev = adapter->dev;
1395 bus_addr_t addr;
1396 int flags;
1397
1398 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1399 switch (memtype) {
1400 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1401 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1402 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1403 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1404 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1405 goto map_err;
1406 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1407 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1408 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1409 }
1410 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1411 adapter->osdep.mem_size, flags,
1412 &adapter->osdep.mem_bus_space_handle) != 0) {
1413 map_err:
1414 adapter->osdep.mem_size = 0;
1415 aprint_error_dev(dev, "unable to map BAR0\n");
1416 return ENXIO;
1417 }
1418 /*
1419 * Enable address decoding for memory range in case it's not
1420 * set.
1421 */
1422 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1423 PCI_COMMAND_STATUS_REG);
1424 csr |= PCI_COMMAND_MEM_ENABLE;
1425 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1426 csr);
1427 break;
1428 default:
1429 aprint_error_dev(dev, "unexpected type on BAR0\n");
1430 return ENXIO;
1431 }
1432
1433 /* Pick up the tuneable queues */
1434 adapter->num_queues = ixv_num_queues;
1435
1436 return (0);
1437 } /* ixv_allocate_pci_resources */
1438
1439 /************************************************************************
1440 * ixv_free_pci_resources
1441 ************************************************************************/
1442 static void
1443 ixv_free_pci_resources(struct adapter * adapter)
1444 {
1445 struct ix_queue *que = adapter->queues;
1446 int rid;
1447
1448 /*
1449 * Release all msix queue resources:
1450 */
1451 for (int i = 0; i < adapter->num_queues; i++, que++) {
1452 if (que->res != NULL)
1453 pci_intr_disestablish(adapter->osdep.pc,
1454 adapter->osdep.ihs[i]);
1455 }
1456
1457
1458 /* Clean the Mailbox interrupt last */
1459 rid = adapter->vector;
1460
1461 if (adapter->osdep.ihs[rid] != NULL) {
1462 pci_intr_disestablish(adapter->osdep.pc,
1463 adapter->osdep.ihs[rid]);
1464 adapter->osdep.ihs[rid] = NULL;
1465 }
1466
1467 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1468 adapter->osdep.nintrs);
1469
1470 if (adapter->osdep.mem_size != 0) {
1471 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1472 adapter->osdep.mem_bus_space_handle,
1473 adapter->osdep.mem_size);
1474 }
1475
1476 return;
1477 } /* ixv_free_pci_resources */
1478
1479 /************************************************************************
1480 * ixv_setup_interface
1481 *
1482 * Setup networking device structure and register an interface.
1483 ************************************************************************/
1484 static int
1485 ixv_setup_interface(device_t dev, struct adapter *adapter)
1486 {
1487 struct ethercom *ec = &adapter->osdep.ec;
1488 struct ifnet *ifp;
1489 int rv;
1490
1491 INIT_DEBUGOUT("ixv_setup_interface: begin");
1492
1493 ifp = adapter->ifp = &ec->ec_if;
1494 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1495 ifp->if_baudrate = IF_Gbps(10);
1496 ifp->if_init = ixv_init;
1497 ifp->if_stop = ixv_ifstop;
1498 ifp->if_softc = adapter;
1499 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1500 #ifdef IXGBE_MPSAFE
1501 ifp->if_extflags = IFEF_MPSAFE;
1502 #endif
1503 ifp->if_ioctl = ixv_ioctl;
1504 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1505 #if 0
1506 ixv_start_locked = ixgbe_legacy_start_locked;
1507 #endif
1508 } else {
1509 ifp->if_transmit = ixgbe_mq_start;
1510 #if 0
1511 ixv_start_locked = ixgbe_mq_start_locked;
1512 #endif
1513 }
1514 ifp->if_start = ixgbe_legacy_start;
1515 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1516 IFQ_SET_READY(&ifp->if_snd);
1517
1518 rv = if_initialize(ifp);
1519 if (rv != 0) {
1520 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1521 return rv;
1522 }
1523 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1524 ether_ifattach(ifp, adapter->hw.mac.addr);
1525 /*
1526 * We use per TX queue softint, so if_deferred_start_init() isn't
1527 * used.
1528 */
1529 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1530
1531 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1532
1533 /*
1534 * Tell the upper layer(s) we support long frames.
1535 */
1536 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1537
1538 /* Set capability flags */
1539 ifp->if_capabilities |= IFCAP_HWCSUM
1540 | IFCAP_TSOv4
1541 | IFCAP_TSOv6;
1542 ifp->if_capenable = 0;
1543
1544 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1545 | ETHERCAP_VLAN_HWTAGGING
1546 | ETHERCAP_VLAN_HWCSUM
1547 | ETHERCAP_JUMBO_MTU
1548 | ETHERCAP_VLAN_MTU;
1549
1550 /* Enable the above capabilities by default */
1551 ec->ec_capenable = ec->ec_capabilities;
1552
1553 /* Don't enable LRO by default */
1554 #if 0
1555 /* NetBSD doesn't support LRO yet */
1556 ifp->if_capabilities |= IFCAP_LRO;
1557 #endif
1558
1559 /*
1560 * Specify the media types supported by this adapter and register
1561 * callbacks to update media and link information
1562 */
1563 ec->ec_ifmedia = &adapter->media;
1564 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1565 ixv_media_status);
1566 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1567 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1568
1569 if_register(ifp);
1570
1571 return 0;
1572 } /* ixv_setup_interface */
1573
1574
1575 /************************************************************************
1576 * ixv_initialize_transmit_units - Enable transmit unit.
1577 ************************************************************************/
1578 static void
1579 ixv_initialize_transmit_units(struct adapter *adapter)
1580 {
1581 struct tx_ring *txr = adapter->tx_rings;
1582 struct ixgbe_hw *hw = &adapter->hw;
1583 int i;
1584
1585 for (i = 0; i < adapter->num_queues; i++, txr++) {
1586 u64 tdba = txr->txdma.dma_paddr;
1587 u32 txctrl, txdctl;
1588 int j = txr->me;
1589
1590 /* Set WTHRESH to 8, burst writeback */
1591 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1592 txdctl |= (8 << 16);
1593 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1594
1595 /* Set the HW Tx Head and Tail indices */
1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1598
1599 /* Set Tx Tail register */
1600 txr->tail = IXGBE_VFTDT(j);
1601
1602 txr->txr_no_space = false;
1603
1604 /* Set Ring parameters */
1605 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1606 (tdba & 0x00000000ffffffffULL));
1607 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1608 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1609 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1610 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1611 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1612 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1613
1614 /* Now enable */
1615 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1616 txdctl |= IXGBE_TXDCTL_ENABLE;
1617 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1618 }
1619
1620 return;
1621 } /* ixv_initialize_transmit_units */
1622
1623
1624 /************************************************************************
1625 * ixv_initialize_rss_mapping
1626 ************************************************************************/
1627 static void
1628 ixv_initialize_rss_mapping(struct adapter *adapter)
1629 {
1630 struct ixgbe_hw *hw = &adapter->hw;
1631 u32 reta = 0, mrqc, rss_key[10];
1632 int queue_id;
1633 int i, j;
1634 u32 rss_hash_config;
1635
1636 /* force use default RSS key. */
1637 #ifdef __NetBSD__
1638 rss_getkey((uint8_t *) &rss_key);
1639 #else
1640 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1641 /* Fetch the configured RSS key */
1642 rss_getkey((uint8_t *)&rss_key);
1643 } else {
1644 /* set up random bits */
1645 cprng_fast(&rss_key, sizeof(rss_key));
1646 }
1647 #endif
1648
1649 /* Now fill out hash function seeds */
1650 for (i = 0; i < 10; i++)
1651 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1652
1653 /* Set up the redirection table */
1654 for (i = 0, j = 0; i < 64; i++, j++) {
1655 if (j == adapter->num_queues)
1656 j = 0;
1657
1658 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1659 /*
1660 * Fetch the RSS bucket id for the given indirection
1661 * entry. Cap it at the number of configured buckets
1662 * (which is num_queues.)
1663 */
1664 queue_id = rss_get_indirection_to_bucket(i);
1665 queue_id = queue_id % adapter->num_queues;
1666 } else
1667 queue_id = j;
1668
1669 /*
1670 * The low 8 bits are for hash value (n+0);
1671 * The next 8 bits are for hash value (n+1), etc.
1672 */
1673 reta >>= 8;
1674 reta |= ((uint32_t)queue_id) << 24;
1675 if ((i & 3) == 3) {
1676 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1677 reta = 0;
1678 }
1679 }
1680
1681 /* Perform hash on these packet types */
1682 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1683 rss_hash_config = rss_gethashconfig();
1684 else {
1685 /*
1686 * Disable UDP - IP fragments aren't currently being handled
1687 * and so we end up with a mix of 2-tuple and 4-tuple
1688 * traffic.
1689 */
1690 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1691 | RSS_HASHTYPE_RSS_TCP_IPV4
1692 | RSS_HASHTYPE_RSS_IPV6
1693 | RSS_HASHTYPE_RSS_TCP_IPV6;
1694 }
1695
1696 mrqc = IXGBE_MRQC_RSSEN;
1697 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1698 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1699 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1700 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1701 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1702 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1703 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1704 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1705 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1706 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1707 __func__);
1708 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1709 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1710 __func__);
1711 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1712 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1713 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1715 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1716 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1717 __func__);
1718 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1719 } /* ixv_initialize_rss_mapping */
1720
1721
1722 /************************************************************************
1723 * ixv_initialize_receive_units - Setup receive registers and features.
1724 ************************************************************************/
1725 static void
1726 ixv_initialize_receive_units(struct adapter *adapter)
1727 {
1728 struct rx_ring *rxr = adapter->rx_rings;
1729 struct ixgbe_hw *hw = &adapter->hw;
1730 struct ifnet *ifp = adapter->ifp;
1731 u32 bufsz, psrtype;
1732
1733 if (ifp->if_mtu > ETHERMTU)
1734 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1735 else
1736 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1737
1738 psrtype = IXGBE_PSRTYPE_TCPHDR
1739 | IXGBE_PSRTYPE_UDPHDR
1740 | IXGBE_PSRTYPE_IPV4HDR
1741 | IXGBE_PSRTYPE_IPV6HDR
1742 | IXGBE_PSRTYPE_L2HDR;
1743
1744 if (adapter->num_queues > 1)
1745 psrtype |= 1 << 29;
1746
1747 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1748
1749 /* Tell PF our max_frame size */
1750 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1751 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1752 }
1753
1754 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1755 u64 rdba = rxr->rxdma.dma_paddr;
1756 u32 reg, rxdctl;
1757 int j = rxr->me;
1758
1759 /* Disable the queue */
1760 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1761 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1762 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1763 for (int k = 0; k < 10; k++) {
1764 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1765 IXGBE_RXDCTL_ENABLE)
1766 msec_delay(1);
1767 else
1768 break;
1769 }
1770 wmb();
1771 /* Setup the Base and Length of the Rx Descriptor Ring */
1772 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1773 (rdba & 0x00000000ffffffffULL));
1774 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1775 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1776 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1777
1778 /* Reset the ring indices */
1779 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1780 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1781
1782 /* Set up the SRRCTL register */
1783 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1784 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1785 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1786 reg |= bufsz;
1787 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1788 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1789
1790 /* Capture Rx Tail index */
1791 rxr->tail = IXGBE_VFRDT(rxr->me);
1792
1793 /* Do the queue enabling last */
1794 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1795 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1796 for (int k = 0; k < 10; k++) {
1797 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1798 IXGBE_RXDCTL_ENABLE)
1799 break;
1800 msec_delay(1);
1801 }
1802 wmb();
1803
1804 /* Set the Tail Pointer */
1805 #ifdef DEV_NETMAP
1806 /*
1807 * In netmap mode, we must preserve the buffers made
1808 * available to userspace before the if_init()
1809 * (this is true by default on the TX side, because
1810 * init makes all buffers available to userspace).
1811 *
1812 * netmap_reset() and the device specific routines
1813 * (e.g. ixgbe_setup_receive_rings()) map these
1814 * buffers at the end of the NIC ring, so here we
1815 * must set the RDT (tail) register to make sure
1816 * they are not overwritten.
1817 *
1818 * In this driver the NIC ring starts at RDH = 0,
1819 * RDT points to the last slot available for reception (?),
1820 * so RDT = num_rx_desc - 1 means the whole ring is available.
1821 */
1822 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1823 (ifp->if_capenable & IFCAP_NETMAP)) {
1824 struct netmap_adapter *na = NA(adapter->ifp);
1825 struct netmap_kring *kring = na->rx_rings[i];
1826 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1827
1828 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1829 } else
1830 #endif /* DEV_NETMAP */
1831 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1832 adapter->num_rx_desc - 1);
1833 }
1834
1835 ixv_initialize_rss_mapping(adapter);
1836 } /* ixv_initialize_receive_units */
1837
1838 /************************************************************************
1839 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1840 *
1841 * Retrieves the TDH value from the hardware
1842 ************************************************************************/
1843 static int
1844 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1845 {
1846 struct sysctlnode node = *rnode;
1847 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1848 uint32_t val;
1849
1850 if (!txr)
1851 return (0);
1852
1853 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1854 node.sysctl_data = &val;
1855 return sysctl_lookup(SYSCTLFN_CALL(&node));
1856 } /* ixv_sysctl_tdh_handler */
1857
1858 /************************************************************************
1859 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1860 *
1861 * Retrieves the TDT value from the hardware
1862 ************************************************************************/
1863 static int
1864 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1865 {
1866 struct sysctlnode node = *rnode;
1867 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1868 uint32_t val;
1869
1870 if (!txr)
1871 return (0);
1872
1873 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1874 node.sysctl_data = &val;
1875 return sysctl_lookup(SYSCTLFN_CALL(&node));
1876 } /* ixv_sysctl_tdt_handler */
1877
1878 /************************************************************************
1879 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1880 * handler function
1881 *
1882 * Retrieves the next_to_check value
1883 ************************************************************************/
1884 static int
1885 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1886 {
1887 struct sysctlnode node = *rnode;
1888 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1889 uint32_t val;
1890
1891 if (!rxr)
1892 return (0);
1893
1894 val = rxr->next_to_check;
1895 node.sysctl_data = &val;
1896 return sysctl_lookup(SYSCTLFN_CALL(&node));
1897 } /* ixv_sysctl_next_to_check_handler */
1898
1899 /************************************************************************
1900 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1901 *
1902 * Retrieves the RDH value from the hardware
1903 ************************************************************************/
1904 static int
1905 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1906 {
1907 struct sysctlnode node = *rnode;
1908 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1909 uint32_t val;
1910
1911 if (!rxr)
1912 return (0);
1913
1914 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1915 node.sysctl_data = &val;
1916 return sysctl_lookup(SYSCTLFN_CALL(&node));
1917 } /* ixv_sysctl_rdh_handler */
1918
1919 /************************************************************************
1920 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1921 *
1922 * Retrieves the RDT value from the hardware
1923 ************************************************************************/
1924 static int
1925 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1926 {
1927 struct sysctlnode node = *rnode;
1928 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1929 uint32_t val;
1930
1931 if (!rxr)
1932 return (0);
1933
1934 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1935 node.sysctl_data = &val;
1936 return sysctl_lookup(SYSCTLFN_CALL(&node));
1937 } /* ixv_sysctl_rdt_handler */
1938
1939 static void
1940 ixv_setup_vlan_tagging(struct adapter *adapter)
1941 {
1942 struct ethercom *ec = &adapter->osdep.ec;
1943 struct ixgbe_hw *hw = &adapter->hw;
1944 struct rx_ring *rxr;
1945 u32 ctrl;
1946 int i;
1947 bool hwtagging;
1948
1949 /* Enable HW tagging only if any vlan is attached */
1950 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
1951 && VLAN_ATTACHED(ec);
1952
1953 /* Enable the queues */
1954 for (i = 0; i < adapter->num_queues; i++) {
1955 rxr = &adapter->rx_rings[i];
1956 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1957 if (hwtagging)
1958 ctrl |= IXGBE_RXDCTL_VME;
1959 else
1960 ctrl &= ~IXGBE_RXDCTL_VME;
1961 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1962 /*
1963 * Let Rx path know that it needs to store VLAN tag
1964 * as part of extra mbuf info.
1965 */
1966 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
1967 }
1968 } /* ixv_setup_vlan_tagging */
1969
1970 /************************************************************************
1971 * ixv_setup_vlan_support
1972 ************************************************************************/
1973 static int
1974 ixv_setup_vlan_support(struct adapter *adapter)
1975 {
1976 struct ethercom *ec = &adapter->osdep.ec;
1977 struct ixgbe_hw *hw = &adapter->hw;
1978 u32 vid, vfta, retry;
1979 struct vlanid_list *vlanidp;
1980 int rv, error = 0;
1981
1982 /*
1983 * This function is called from both if_init and ifflags_cb()
1984 * on NetBSD.
1985 */
1986
1987 /*
1988 * Part 1:
1989 * Setup VLAN HW tagging
1990 */
1991 ixv_setup_vlan_tagging(adapter);
1992
1993 if (!VLAN_ATTACHED(ec))
1994 return 0;
1995
1996 /*
1997 * Part 2:
1998 * Setup VLAN HW filter
1999 */
2000 /* Cleanup shadow_vfta */
2001 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2002 adapter->shadow_vfta[i] = 0;
2003 /* Generate shadow_vfta from ec_vids */
2004 mutex_enter(ec->ec_lock);
2005 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2006 uint32_t idx;
2007
2008 idx = vlanidp->vid / 32;
2009 KASSERT(idx < IXGBE_VFTA_SIZE);
2010 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2011 }
2012 mutex_exit(ec->ec_lock);
2013
2014 /*
2015 * A soft reset zero's out the VFTA, so
2016 * we need to repopulate it now.
2017 */
2018 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2019 if (adapter->shadow_vfta[i] == 0)
2020 continue;
2021 vfta = adapter->shadow_vfta[i];
2022 /*
2023 * Reconstruct the vlan id's
2024 * based on the bits set in each
2025 * of the array ints.
2026 */
2027 for (int j = 0; j < 32; j++) {
2028 retry = 0;
2029 if ((vfta & ((u32)1 << j)) == 0)
2030 continue;
2031 vid = (i * 32) + j;
2032
2033 /* Call the shared code mailbox routine */
2034 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2035 FALSE)) != 0) {
2036 if (++retry > 5) {
2037 device_printf(adapter->dev,
2038 "%s: max retry exceeded\n",
2039 __func__);
2040 break;
2041 }
2042 }
2043 if (rv != 0) {
2044 device_printf(adapter->dev,
2045 "failed to set vlan %d\n", vid);
2046 error = EACCES;
2047 }
2048 }
2049 }
2050 return error;
2051 } /* ixv_setup_vlan_support */
2052
2053 static int
2054 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2055 {
2056 struct ifnet *ifp = &ec->ec_if;
2057 struct adapter *adapter = ifp->if_softc;
2058 int rv;
2059
2060 if (set)
2061 rv = ixv_register_vlan(ifp->if_softc, ifp, vid);
2062 else
2063 rv = ixv_unregister_vlan(ifp->if_softc, ifp, vid);
2064
2065 if (rv != 0)
2066 return rv;
2067
2068 /*
2069 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2070 * or 0 to 1.
2071 */
2072 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2073 ixv_setup_vlan_tagging(adapter);
2074
2075 return rv;
2076 }
2077
2078 /************************************************************************
2079 * ixv_register_vlan
2080 *
2081 * Run via a vlan config EVENT, it enables us to use the
2082 * HW Filter table since we can get the vlan id. This just
2083 * creates the entry in the soft version of the VFTA, init
2084 * will repopulate the real table.
2085 ************************************************************************/
2086 static int
2087 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2088 {
2089 struct adapter *adapter = ifp->if_softc;
2090 struct ixgbe_hw *hw = &adapter->hw;
2091 u16 index, bit;
2092 int error;
2093
2094 if (ifp->if_softc != arg) /* Not our event */
2095 return EINVAL;
2096
2097 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2098 return EINVAL;
2099 IXGBE_CORE_LOCK(adapter);
2100 index = (vtag >> 5) & 0x7F;
2101 bit = vtag & 0x1F;
2102 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2103 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2104 IXGBE_CORE_UNLOCK(adapter);
2105
2106 if (error != 0) {
2107 device_printf(adapter->dev, "failed to register vlan %hu\n",
2108 vtag);
2109 error = EACCES;
2110 }
2111 return error;
2112 } /* ixv_register_vlan */
2113
2114 /************************************************************************
2115 * ixv_unregister_vlan
2116 *
2117 * Run via a vlan unconfig EVENT, remove our entry
2118 * in the soft vfta.
2119 ************************************************************************/
2120 static int
2121 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2122 {
2123 struct adapter *adapter = ifp->if_softc;
2124 struct ixgbe_hw *hw = &adapter->hw;
2125 u16 index, bit;
2126 int error;
2127
2128 if (ifp->if_softc != arg)
2129 return EINVAL;
2130
2131 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2132 return EINVAL;
2133
2134 IXGBE_CORE_LOCK(adapter);
2135 index = (vtag >> 5) & 0x7F;
2136 bit = vtag & 0x1F;
2137 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2138 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2139 IXGBE_CORE_UNLOCK(adapter);
2140
2141 if (error != 0) {
2142 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2143 vtag);
2144 error = EIO;
2145 }
2146 return error;
2147 } /* ixv_unregister_vlan */
2148
2149 /************************************************************************
2150 * ixv_enable_intr
2151 ************************************************************************/
2152 static void
2153 ixv_enable_intr(struct adapter *adapter)
2154 {
2155 struct ixgbe_hw *hw = &adapter->hw;
2156 struct ix_queue *que = adapter->queues;
2157 u32 mask;
2158 int i;
2159
2160 /* For VTEIAC */
2161 mask = (1 << adapter->vector);
2162 for (i = 0; i < adapter->num_queues; i++, que++)
2163 mask |= (1 << que->msix);
2164 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2165
2166 /* For VTEIMS */
2167 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2168 que = adapter->queues;
2169 for (i = 0; i < adapter->num_queues; i++, que++)
2170 ixv_enable_queue(adapter, que->msix);
2171
2172 IXGBE_WRITE_FLUSH(hw);
2173 } /* ixv_enable_intr */
2174
2175 /************************************************************************
2176 * ixv_disable_intr
2177 ************************************************************************/
2178 static void
2179 ixv_disable_intr(struct adapter *adapter)
2180 {
2181 struct ix_queue *que = adapter->queues;
2182
2183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2184
2185 /* disable interrupts other than queues */
2186 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2187
2188 for (int i = 0; i < adapter->num_queues; i++, que++)
2189 ixv_disable_queue(adapter, que->msix);
2190
2191 IXGBE_WRITE_FLUSH(&adapter->hw);
2192 } /* ixv_disable_intr */
2193
2194 /************************************************************************
2195 * ixv_set_ivar
2196 *
2197 * Setup the correct IVAR register for a particular MSI-X interrupt
2198 * - entry is the register array entry
2199 * - vector is the MSI-X vector for this queue
2200 * - type is RX/TX/MISC
2201 ************************************************************************/
2202 static void
2203 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2204 {
2205 struct ixgbe_hw *hw = &adapter->hw;
2206 u32 ivar, index;
2207
2208 vector |= IXGBE_IVAR_ALLOC_VAL;
2209
2210 if (type == -1) { /* MISC IVAR */
2211 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2212 ivar &= ~0xFF;
2213 ivar |= vector;
2214 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2215 } else { /* RX/TX IVARS */
2216 index = (16 * (entry & 1)) + (8 * type);
2217 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2218 ivar &= ~(0xffUL << index);
2219 ivar |= ((u32)vector << index);
2220 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2221 }
2222 } /* ixv_set_ivar */
2223
2224 /************************************************************************
2225 * ixv_configure_ivars
2226 ************************************************************************/
2227 static void
2228 ixv_configure_ivars(struct adapter *adapter)
2229 {
2230 struct ix_queue *que = adapter->queues;
2231
2232 /* XXX We should sync EITR value calculation with ixgbe.c? */
2233
2234 for (int i = 0; i < adapter->num_queues; i++, que++) {
2235 /* First the RX queue entry */
2236 ixv_set_ivar(adapter, i, que->msix, 0);
2237 /* ... and the TX */
2238 ixv_set_ivar(adapter, i, que->msix, 1);
2239 /* Set an initial value in EITR */
2240 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2241 }
2242
2243 /* For the mailbox interrupt */
2244 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2245 } /* ixv_configure_ivars */
2246
2247
2248 /************************************************************************
2249 * ixv_save_stats
2250 *
2251 * The VF stats registers never have a truly virgin
2252 * starting point, so this routine tries to make an
2253 * artificial one, marking ground zero on attach as
2254 * it were.
2255 ************************************************************************/
2256 static void
2257 ixv_save_stats(struct adapter *adapter)
2258 {
2259 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2260
2261 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2262 stats->saved_reset_vfgprc +=
2263 stats->vfgprc.ev_count - stats->base_vfgprc;
2264 stats->saved_reset_vfgptc +=
2265 stats->vfgptc.ev_count - stats->base_vfgptc;
2266 stats->saved_reset_vfgorc +=
2267 stats->vfgorc.ev_count - stats->base_vfgorc;
2268 stats->saved_reset_vfgotc +=
2269 stats->vfgotc.ev_count - stats->base_vfgotc;
2270 stats->saved_reset_vfmprc +=
2271 stats->vfmprc.ev_count - stats->base_vfmprc;
2272 }
2273 } /* ixv_save_stats */
2274
2275 /************************************************************************
2276 * ixv_init_stats
2277 ************************************************************************/
2278 static void
2279 ixv_init_stats(struct adapter *adapter)
2280 {
2281 struct ixgbe_hw *hw = &adapter->hw;
2282
2283 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2284 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2285 adapter->stats.vf.last_vfgorc |=
2286 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2287
2288 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2289 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2290 adapter->stats.vf.last_vfgotc |=
2291 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2292
2293 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2294
2295 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2296 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2297 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2298 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2299 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2300 } /* ixv_init_stats */
2301
2302 #define UPDATE_STAT_32(reg, last, count) \
2303 { \
2304 u32 current = IXGBE_READ_REG(hw, (reg)); \
2305 if (current < (last)) \
2306 count.ev_count += 0x100000000LL; \
2307 (last) = current; \
2308 count.ev_count &= 0xFFFFFFFF00000000LL; \
2309 count.ev_count |= current; \
2310 }
2311
2312 #define UPDATE_STAT_36(lsb, msb, last, count) \
2313 { \
2314 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2315 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2316 u64 current = ((cur_msb << 32) | cur_lsb); \
2317 if (current < (last)) \
2318 count.ev_count += 0x1000000000LL; \
2319 (last) = current; \
2320 count.ev_count &= 0xFFFFFFF000000000LL; \
2321 count.ev_count |= current; \
2322 }
2323
2324 /************************************************************************
2325 * ixv_update_stats - Update the board statistics counters.
2326 ************************************************************************/
2327 void
2328 ixv_update_stats(struct adapter *adapter)
2329 {
2330 struct ixgbe_hw *hw = &adapter->hw;
2331 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2332
2333 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2334 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2335 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2336 stats->vfgorc);
2337 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2338 stats->vfgotc);
2339 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2340
2341 /* Fill out the OS statistics structure */
2342 /*
2343 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2344 * adapter->stats counters. It's required to make ifconfig -z
2345 * (SOICZIFDATA) work.
2346 */
2347 } /* ixv_update_stats */
2348
2349 /************************************************************************
2350 * ixv_sysctl_interrupt_rate_handler
2351 ************************************************************************/
2352 static int
2353 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2354 {
2355 struct sysctlnode node = *rnode;
2356 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2357 struct adapter *adapter = que->adapter;
2358 uint32_t reg, usec, rate;
2359 int error;
2360
2361 if (que == NULL)
2362 return 0;
2363 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2364 usec = ((reg & 0x0FF8) >> 3);
2365 if (usec > 0)
2366 rate = 500000 / usec;
2367 else
2368 rate = 0;
2369 node.sysctl_data = &rate;
2370 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2371 if (error || newp == NULL)
2372 return error;
2373 reg &= ~0xfff; /* default, no limitation */
2374 if (rate > 0 && rate < 500000) {
2375 if (rate < 1000)
2376 rate = 1000;
2377 reg |= ((4000000/rate) & 0xff8);
2378 /*
2379 * When RSC is used, ITR interval must be larger than
2380 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2381 * The minimum value is always greater than 2us on 100M
2382 * (and 10M?(not documented)), but it's not on 1G and higher.
2383 */
2384 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2385 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2386 if ((adapter->num_queues > 1)
2387 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2388 return EINVAL;
2389 }
2390 ixv_max_interrupt_rate = rate;
2391 } else
2392 ixv_max_interrupt_rate = 0;
2393 ixv_eitr_write(adapter, que->msix, reg);
2394
2395 return (0);
2396 } /* ixv_sysctl_interrupt_rate_handler */
2397
2398 const struct sysctlnode *
2399 ixv_sysctl_instance(struct adapter *adapter)
2400 {
2401 const char *dvname;
2402 struct sysctllog **log;
2403 int rc;
2404 const struct sysctlnode *rnode;
2405
2406 log = &adapter->sysctllog;
2407 dvname = device_xname(adapter->dev);
2408
2409 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2410 0, CTLTYPE_NODE, dvname,
2411 SYSCTL_DESCR("ixv information and settings"),
2412 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2413 goto err;
2414
2415 return rnode;
2416 err:
2417 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2418 return NULL;
2419 }
2420
2421 static void
2422 ixv_add_device_sysctls(struct adapter *adapter)
2423 {
2424 struct sysctllog **log;
2425 const struct sysctlnode *rnode, *cnode;
2426 device_t dev;
2427
2428 dev = adapter->dev;
2429 log = &adapter->sysctllog;
2430
2431 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2432 aprint_error_dev(dev, "could not create sysctl root\n");
2433 return;
2434 }
2435
2436 if (sysctl_createv(log, 0, &rnode, &cnode,
2437 CTLFLAG_READWRITE, CTLTYPE_INT,
2438 "debug", SYSCTL_DESCR("Debug Info"),
2439 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2440 aprint_error_dev(dev, "could not create sysctl\n");
2441
2442 if (sysctl_createv(log, 0, &rnode, &cnode,
2443 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2444 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2445 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2446 aprint_error_dev(dev, "could not create sysctl\n");
2447
2448 if (sysctl_createv(log, 0, &rnode, &cnode,
2449 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2450 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2451 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2452 aprint_error_dev(dev, "could not create sysctl\n");
2453 }
2454
2455 /************************************************************************
2456 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2457 ************************************************************************/
2458 static void
2459 ixv_add_stats_sysctls(struct adapter *adapter)
2460 {
2461 device_t dev = adapter->dev;
2462 struct tx_ring *txr = adapter->tx_rings;
2463 struct rx_ring *rxr = adapter->rx_rings;
2464 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2465 struct ixgbe_hw *hw = &adapter->hw;
2466 const struct sysctlnode *rnode, *cnode;
2467 struct sysctllog **log = &adapter->sysctllog;
2468 const char *xname = device_xname(dev);
2469
2470 /* Driver Statistics */
2471 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2472 NULL, xname, "Driver tx dma soft fail EFBIG");
2473 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2474 NULL, xname, "m_defrag() failed");
2475 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2476 NULL, xname, "Driver tx dma hard fail EFBIG");
2477 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2478 NULL, xname, "Driver tx dma hard fail EINVAL");
2479 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2480 NULL, xname, "Driver tx dma hard fail other");
2481 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2482 NULL, xname, "Driver tx dma soft fail EAGAIN");
2483 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2484 NULL, xname, "Driver tx dma soft fail ENOMEM");
2485 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2486 NULL, xname, "Watchdog timeouts");
2487 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2488 NULL, xname, "TSO errors");
2489 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2490 NULL, xname, "Link MSI-X IRQ Handled");
2491
2492 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2493 snprintf(adapter->queues[i].evnamebuf,
2494 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2495 xname, i);
2496 snprintf(adapter->queues[i].namebuf,
2497 sizeof(adapter->queues[i].namebuf), "q%d", i);
2498
2499 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2500 aprint_error_dev(dev, "could not create sysctl root\n");
2501 break;
2502 }
2503
2504 if (sysctl_createv(log, 0, &rnode, &rnode,
2505 0, CTLTYPE_NODE,
2506 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2507 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2508 break;
2509
2510 if (sysctl_createv(log, 0, &rnode, &cnode,
2511 CTLFLAG_READWRITE, CTLTYPE_INT,
2512 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2513 ixv_sysctl_interrupt_rate_handler, 0,
2514 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2515 break;
2516
2517 if (sysctl_createv(log, 0, &rnode, &cnode,
2518 CTLFLAG_READONLY, CTLTYPE_INT,
2519 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2520 ixv_sysctl_tdh_handler, 0, (void *)txr,
2521 0, CTL_CREATE, CTL_EOL) != 0)
2522 break;
2523
2524 if (sysctl_createv(log, 0, &rnode, &cnode,
2525 CTLFLAG_READONLY, CTLTYPE_INT,
2526 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2527 ixv_sysctl_tdt_handler, 0, (void *)txr,
2528 0, CTL_CREATE, CTL_EOL) != 0)
2529 break;
2530
2531 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2532 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2533 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2534 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2535 "Handled queue in softint");
2536 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2537 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2538 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2539 NULL, adapter->queues[i].evnamebuf, "TSO");
2540 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2541 NULL, adapter->queues[i].evnamebuf,
2542 "Queue No Descriptor Available");
2543 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2544 NULL, adapter->queues[i].evnamebuf,
2545 "Queue Packets Transmitted");
2546 #ifndef IXGBE_LEGACY_TX
2547 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2548 NULL, adapter->queues[i].evnamebuf,
2549 "Packets dropped in pcq");
2550 #endif
2551
2552 #ifdef LRO
2553 struct lro_ctrl *lro = &rxr->lro;
2554 #endif /* LRO */
2555
2556 if (sysctl_createv(log, 0, &rnode, &cnode,
2557 CTLFLAG_READONLY,
2558 CTLTYPE_INT,
2559 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2560 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2561 CTL_CREATE, CTL_EOL) != 0)
2562 break;
2563
2564 if (sysctl_createv(log, 0, &rnode, &cnode,
2565 CTLFLAG_READONLY,
2566 CTLTYPE_INT,
2567 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2568 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2569 CTL_CREATE, CTL_EOL) != 0)
2570 break;
2571
2572 if (sysctl_createv(log, 0, &rnode, &cnode,
2573 CTLFLAG_READONLY,
2574 CTLTYPE_INT,
2575 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2576 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2577 CTL_CREATE, CTL_EOL) != 0)
2578 break;
2579
2580 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2581 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2582 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2583 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2584 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2585 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2586 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2587 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2588 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2589 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2590 #ifdef LRO
2591 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2592 CTLFLAG_RD, &lro->lro_queued, 0,
2593 "LRO Queued");
2594 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2595 CTLFLAG_RD, &lro->lro_flushed, 0,
2596 "LRO Flushed");
2597 #endif /* LRO */
2598 }
2599
2600 /* MAC stats get their own sub node */
2601
2602 snprintf(stats->namebuf,
2603 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2604
2605 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2606 stats->namebuf, "rx csum offload - IP");
2607 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2608 stats->namebuf, "rx csum offload - L4");
2609 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2610 stats->namebuf, "rx csum offload - IP bad");
2611 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2612 stats->namebuf, "rx csum offload - L4 bad");
2613
2614 /* Packet Reception Stats */
2615 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2616 xname, "Good Packets Received");
2617 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2618 xname, "Good Octets Received");
2619 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2620 xname, "Multicast Packets Received");
2621 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2622 xname, "Good Packets Transmitted");
2623 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2624 xname, "Good Octets Transmitted");
2625
2626 /* Mailbox Stats */
2627 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2628 xname, "message TXs");
2629 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2630 xname, "message RXs");
2631 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2632 xname, "ACKs");
2633 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2634 xname, "REQs");
2635 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2636 xname, "RSTs");
2637
2638 } /* ixv_add_stats_sysctls */
2639
2640 /************************************************************************
2641 * ixv_set_sysctl_value
2642 ************************************************************************/
2643 static void
2644 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2645 const char *description, int *limit, int value)
2646 {
2647 device_t dev = adapter->dev;
2648 struct sysctllog **log;
2649 const struct sysctlnode *rnode, *cnode;
2650
2651 log = &adapter->sysctllog;
2652 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2653 aprint_error_dev(dev, "could not create sysctl root\n");
2654 return;
2655 }
2656 if (sysctl_createv(log, 0, &rnode, &cnode,
2657 CTLFLAG_READWRITE, CTLTYPE_INT,
2658 name, SYSCTL_DESCR(description),
2659 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2660 aprint_error_dev(dev, "could not create sysctl\n");
2661 *limit = value;
2662 } /* ixv_set_sysctl_value */
2663
2664 /************************************************************************
2665 * ixv_print_debug_info
2666 *
2667 * Called only when em_display_debug_stats is enabled.
2668 * Provides a way to take a look at important statistics
2669 * maintained by the driver and hardware.
2670 ************************************************************************/
2671 static void
2672 ixv_print_debug_info(struct adapter *adapter)
2673 {
2674 device_t dev = adapter->dev;
2675 struct ix_queue *que = adapter->queues;
2676 struct rx_ring *rxr;
2677 struct tx_ring *txr;
2678 #ifdef LRO
2679 struct lro_ctrl *lro;
2680 #endif /* LRO */
2681
2682 for (int i = 0; i < adapter->num_queues; i++, que++) {
2683 txr = que->txr;
2684 rxr = que->rxr;
2685 #ifdef LRO
2686 lro = &rxr->lro;
2687 #endif /* LRO */
2688 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2689 que->msix, (long)que->irqs.ev_count);
2690 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2691 rxr->me, (long long)rxr->rx_packets.ev_count);
2692 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2693 rxr->me, (long)rxr->rx_bytes.ev_count);
2694 #ifdef LRO
2695 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2696 rxr->me, (uintmax_t)lro->lro_queued);
2697 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2698 rxr->me, (uintmax_t)lro->lro_flushed);
2699 #endif /* LRO */
2700 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2701 txr->me, (long)txr->total_packets.ev_count);
2702 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2703 txr->me, (long)txr->no_desc_avail.ev_count);
2704 }
2705
2706 device_printf(dev, "MBX IRQ Handled: %lu\n",
2707 (long)adapter->link_irq.ev_count);
2708 } /* ixv_print_debug_info */
2709
2710 /************************************************************************
2711 * ixv_sysctl_debug
2712 ************************************************************************/
2713 static int
2714 ixv_sysctl_debug(SYSCTLFN_ARGS)
2715 {
2716 struct sysctlnode node = *rnode;
2717 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2718 int error, result;
2719
2720 node.sysctl_data = &result;
2721 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2722
2723 if (error || newp == NULL)
2724 return error;
2725
2726 if (result == 1)
2727 ixv_print_debug_info(adapter);
2728
2729 return 0;
2730 } /* ixv_sysctl_debug */
2731
2732 /************************************************************************
2733 * ixv_init_device_features
2734 ************************************************************************/
2735 static void
2736 ixv_init_device_features(struct adapter *adapter)
2737 {
2738 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2739 | IXGBE_FEATURE_VF
2740 | IXGBE_FEATURE_RSS
2741 | IXGBE_FEATURE_LEGACY_TX;
2742
2743 /* A tad short on feature flags for VFs, atm. */
2744 switch (adapter->hw.mac.type) {
2745 case ixgbe_mac_82599_vf:
2746 break;
2747 case ixgbe_mac_X540_vf:
2748 break;
2749 case ixgbe_mac_X550_vf:
2750 case ixgbe_mac_X550EM_x_vf:
2751 case ixgbe_mac_X550EM_a_vf:
2752 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2753 break;
2754 default:
2755 break;
2756 }
2757
2758 /* Enabled by default... */
2759 /* Is a virtual function (VF) */
2760 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2761 adapter->feat_en |= IXGBE_FEATURE_VF;
2762 /* Netmap */
2763 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2764 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2765 /* Receive-Side Scaling (RSS) */
2766 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2767 adapter->feat_en |= IXGBE_FEATURE_RSS;
2768 /* Needs advanced context descriptor regardless of offloads req'd */
2769 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2770 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2771
2772 /* Enabled via sysctl... */
2773 /* Legacy (single queue) transmit */
2774 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2775 ixv_enable_legacy_tx)
2776 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2777 } /* ixv_init_device_features */
2778
2779 /************************************************************************
2780 * ixv_shutdown - Shutdown entry point
2781 ************************************************************************/
2782 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2783 static int
2784 ixv_shutdown(device_t dev)
2785 {
2786 struct adapter *adapter = device_private(dev);
2787 IXGBE_CORE_LOCK(adapter);
2788 ixv_stop(adapter);
2789 IXGBE_CORE_UNLOCK(adapter);
2790
2791 return (0);
2792 } /* ixv_shutdown */
2793 #endif
2794
2795 static int
2796 ixv_ifflags_cb(struct ethercom *ec)
2797 {
2798 struct ifnet *ifp = &ec->ec_if;
2799 struct adapter *adapter = ifp->if_softc;
2800 int change, rv = 0;
2801
2802 IXGBE_CORE_LOCK(adapter);
2803
2804 change = ifp->if_flags ^ adapter->if_flags;
2805 if (change != 0)
2806 adapter->if_flags = ifp->if_flags;
2807
2808 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2809 rv = ENETRESET;
2810 goto out;
2811 }
2812
2813 /* Check for ec_capenable. */
2814 change = ec->ec_capenable ^ adapter->ec_capenable;
2815 adapter->ec_capenable = ec->ec_capenable;
2816 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
2817 | ETHERCAP_VLAN_HWFILTER)) != 0) {
2818 rv = ENETRESET;
2819 goto out;
2820 }
2821
2822 /*
2823 * Special handling is not required for ETHERCAP_VLAN_MTU.
2824 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
2825 */
2826
2827 /* Set up VLAN support and filter */
2828 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
2829 rv = ixv_setup_vlan_support(adapter);
2830
2831 out:
2832 IXGBE_CORE_UNLOCK(adapter);
2833
2834 return rv;
2835 }
2836
2837
2838 /************************************************************************
2839 * ixv_ioctl - Ioctl entry point
2840 *
2841 * Called when the user wants to configure the interface.
2842 *
2843 * return 0 on success, positive on failure
2844 ************************************************************************/
2845 static int
2846 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2847 {
2848 struct adapter *adapter = ifp->if_softc;
2849 struct ifcapreq *ifcr = data;
2850 int error = 0;
2851 int l4csum_en;
2852 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
2853 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2854
2855 switch (command) {
2856 case SIOCSIFFLAGS:
2857 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2858 break;
2859 case SIOCADDMULTI:
2860 case SIOCDELMULTI:
2861 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2862 break;
2863 case SIOCSIFMEDIA:
2864 case SIOCGIFMEDIA:
2865 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2866 break;
2867 case SIOCSIFCAP:
2868 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2869 break;
2870 case SIOCSIFMTU:
2871 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2872 break;
2873 default:
2874 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2875 break;
2876 }
2877
2878 switch (command) {
2879 case SIOCSIFCAP:
2880 /* Layer-4 Rx checksum offload has to be turned on and
2881 * off as a unit.
2882 */
2883 l4csum_en = ifcr->ifcr_capenable & l4csum;
2884 if (l4csum_en != l4csum && l4csum_en != 0)
2885 return EINVAL;
2886 /*FALLTHROUGH*/
2887 case SIOCADDMULTI:
2888 case SIOCDELMULTI:
2889 case SIOCSIFFLAGS:
2890 case SIOCSIFMTU:
2891 default:
2892 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2893 return error;
2894 if ((ifp->if_flags & IFF_RUNNING) == 0)
2895 ;
2896 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2897 IXGBE_CORE_LOCK(adapter);
2898 ixv_init_locked(adapter);
2899 IXGBE_CORE_UNLOCK(adapter);
2900 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2901 /*
2902 * Multicast list has changed; set the hardware filter
2903 * accordingly.
2904 */
2905 IXGBE_CORE_LOCK(adapter);
2906 ixv_disable_intr(adapter);
2907 ixv_set_multi(adapter);
2908 ixv_enable_intr(adapter);
2909 IXGBE_CORE_UNLOCK(adapter);
2910 }
2911 return 0;
2912 }
2913 } /* ixv_ioctl */
2914
2915 /************************************************************************
2916 * ixv_init
2917 ************************************************************************/
2918 static int
2919 ixv_init(struct ifnet *ifp)
2920 {
2921 struct adapter *adapter = ifp->if_softc;
2922
2923 IXGBE_CORE_LOCK(adapter);
2924 ixv_init_locked(adapter);
2925 IXGBE_CORE_UNLOCK(adapter);
2926
2927 return 0;
2928 } /* ixv_init */
2929
2930 /************************************************************************
2931 * ixv_handle_que
2932 ************************************************************************/
2933 static void
2934 ixv_handle_que(void *context)
2935 {
2936 struct ix_queue *que = context;
2937 struct adapter *adapter = que->adapter;
2938 struct tx_ring *txr = que->txr;
2939 struct ifnet *ifp = adapter->ifp;
2940 bool more;
2941
2942 que->handleq.ev_count++;
2943
2944 if (ifp->if_flags & IFF_RUNNING) {
2945 more = ixgbe_rxeof(que);
2946 IXGBE_TX_LOCK(txr);
2947 more |= ixgbe_txeof(txr);
2948 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2949 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2950 ixgbe_mq_start_locked(ifp, txr);
2951 /* Only for queue 0 */
2952 /* NetBSD still needs this for CBQ */
2953 if ((&adapter->queues[0] == que)
2954 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2955 ixgbe_legacy_start_locked(ifp, txr);
2956 IXGBE_TX_UNLOCK(txr);
2957 if (more) {
2958 que->req.ev_count++;
2959 if (adapter->txrx_use_workqueue) {
2960 /*
2961 * "enqueued flag" is not required here
2962 * the same as ixg(4). See ixgbe_msix_que().
2963 */
2964 workqueue_enqueue(adapter->que_wq,
2965 &que->wq_cookie, curcpu());
2966 } else
2967 softint_schedule(que->que_si);
2968 return;
2969 }
2970 }
2971
2972 /* Re-enable this interrupt */
2973 ixv_enable_queue(adapter, que->msix);
2974
2975 return;
2976 } /* ixv_handle_que */
2977
2978 /************************************************************************
2979 * ixv_handle_que_work
2980 ************************************************************************/
2981 static void
2982 ixv_handle_que_work(struct work *wk, void *context)
2983 {
2984 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2985
2986 /*
2987 * "enqueued flag" is not required here the same as ixg(4).
2988 * See ixgbe_msix_que().
2989 */
2990 ixv_handle_que(que);
2991 }
2992
2993 /************************************************************************
2994 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2995 ************************************************************************/
2996 static int
2997 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2998 {
2999 device_t dev = adapter->dev;
3000 struct ix_queue *que = adapter->queues;
3001 struct tx_ring *txr = adapter->tx_rings;
3002 int error, msix_ctrl, rid, vector = 0;
3003 pci_chipset_tag_t pc;
3004 pcitag_t tag;
3005 char intrbuf[PCI_INTRSTR_LEN];
3006 char wqname[MAXCOMLEN];
3007 char intr_xname[32];
3008 const char *intrstr = NULL;
3009 kcpuset_t *affinity;
3010 int cpu_id = 0;
3011
3012 pc = adapter->osdep.pc;
3013 tag = adapter->osdep.tag;
3014
3015 adapter->osdep.nintrs = adapter->num_queues + 1;
3016 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3017 adapter->osdep.nintrs) != 0) {
3018 aprint_error_dev(dev,
3019 "failed to allocate MSI-X interrupt\n");
3020 return (ENXIO);
3021 }
3022
3023 kcpuset_create(&affinity, false);
3024 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3025 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3026 device_xname(dev), i);
3027 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3028 sizeof(intrbuf));
3029 #ifdef IXGBE_MPSAFE
3030 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3031 true);
3032 #endif
3033 /* Set the handler function */
3034 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3035 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3036 intr_xname);
3037 if (que->res == NULL) {
3038 pci_intr_release(pc, adapter->osdep.intrs,
3039 adapter->osdep.nintrs);
3040 aprint_error_dev(dev,
3041 "Failed to register QUE handler\n");
3042 kcpuset_destroy(affinity);
3043 return (ENXIO);
3044 }
3045 que->msix = vector;
3046 adapter->active_queues |= (u64)(1 << que->msix);
3047
3048 cpu_id = i;
3049 /* Round-robin affinity */
3050 kcpuset_zero(affinity);
3051 kcpuset_set(affinity, cpu_id % ncpu);
3052 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3053 NULL);
3054 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3055 intrstr);
3056 if (error == 0)
3057 aprint_normal(", bound queue %d to cpu %d\n",
3058 i, cpu_id % ncpu);
3059 else
3060 aprint_normal("\n");
3061
3062 #ifndef IXGBE_LEGACY_TX
3063 txr->txr_si
3064 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3065 ixgbe_deferred_mq_start, txr);
3066 #endif
3067 que->que_si
3068 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3069 ixv_handle_que, que);
3070 if (que->que_si == NULL) {
3071 aprint_error_dev(dev,
3072 "could not establish software interrupt\n");
3073 }
3074 }
3075 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3076 error = workqueue_create(&adapter->txr_wq, wqname,
3077 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3078 IXGBE_WORKQUEUE_FLAGS);
3079 if (error) {
3080 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
3081 }
3082 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3083
3084 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3085 error = workqueue_create(&adapter->que_wq, wqname,
3086 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3087 IXGBE_WORKQUEUE_FLAGS);
3088 if (error) {
3089 aprint_error_dev(dev,
3090 "couldn't create workqueue\n");
3091 }
3092
3093 /* and Mailbox */
3094 cpu_id++;
3095 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3096 adapter->vector = vector;
3097 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3098 sizeof(intrbuf));
3099 #ifdef IXGBE_MPSAFE
3100 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3101 true);
3102 #endif
3103 /* Set the mbx handler function */
3104 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3105 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3106 intr_xname);
3107 if (adapter->osdep.ihs[vector] == NULL) {
3108 aprint_error_dev(dev, "Failed to register LINK handler\n");
3109 kcpuset_destroy(affinity);
3110 return (ENXIO);
3111 }
3112 /* Round-robin affinity */
3113 kcpuset_zero(affinity);
3114 kcpuset_set(affinity, cpu_id % ncpu);
3115 error = interrupt_distribute(adapter->osdep.ihs[vector],
3116 affinity, NULL);
3117
3118 aprint_normal_dev(dev,
3119 "for link, interrupting at %s", intrstr);
3120 if (error == 0)
3121 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3122 else
3123 aprint_normal("\n");
3124
3125 /* Tasklets for Mailbox */
3126 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
3127 ixv_handle_link, adapter);
3128 /*
3129 * Due to a broken design QEMU will fail to properly
3130 * enable the guest for MSI-X unless the vectors in
3131 * the table are all set up, so we must rewrite the
3132 * ENABLE in the MSI-X control register again at this
3133 * point to cause it to successfully initialize us.
3134 */
3135 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3136 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3137 rid += PCI_MSIX_CTL;
3138 msix_ctrl = pci_conf_read(pc, tag, rid);
3139 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3140 pci_conf_write(pc, tag, rid, msix_ctrl);
3141 }
3142
3143 kcpuset_destroy(affinity);
3144 return (0);
3145 } /* ixv_allocate_msix */
3146
3147 /************************************************************************
3148 * ixv_configure_interrupts - Setup MSI-X resources
3149 *
3150 * Note: The VF device MUST use MSI-X, there is no fallback.
3151 ************************************************************************/
3152 static int
3153 ixv_configure_interrupts(struct adapter *adapter)
3154 {
3155 device_t dev = adapter->dev;
3156 int want, queues, msgs;
3157
3158 /* Must have at least 2 MSI-X vectors */
3159 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3160 if (msgs < 2) {
3161 aprint_error_dev(dev, "MSIX config error\n");
3162 return (ENXIO);
3163 }
3164 msgs = MIN(msgs, IXG_MAX_NINTR);
3165
3166 /* Figure out a reasonable auto config value */
3167 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3168
3169 if (ixv_num_queues != 0)
3170 queues = ixv_num_queues;
3171 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3172 queues = IXGBE_VF_MAX_TX_QUEUES;
3173
3174 /*
3175 * Want vectors for the queues,
3176 * plus an additional for mailbox.
3177 */
3178 want = queues + 1;
3179 if (msgs >= want)
3180 msgs = want;
3181 else {
3182 aprint_error_dev(dev,
3183 "MSI-X Configuration Problem, "
3184 "%d vectors but %d queues wanted!\n",
3185 msgs, want);
3186 return -1;
3187 }
3188
3189 adapter->msix_mem = (void *)1; /* XXX */
3190 aprint_normal_dev(dev,
3191 "Using MSI-X interrupts with %d vectors\n", msgs);
3192 adapter->num_queues = queues;
3193
3194 return (0);
3195 } /* ixv_configure_interrupts */
3196
3197
3198 /************************************************************************
3199 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3200 *
3201 * Done outside of interrupt context since the driver might sleep
3202 ************************************************************************/
3203 static void
3204 ixv_handle_link(void *context)
3205 {
3206 struct adapter *adapter = context;
3207
3208 IXGBE_CORE_LOCK(adapter);
3209
3210 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3211 &adapter->link_up, FALSE);
3212 ixv_update_link_status(adapter);
3213
3214 IXGBE_CORE_UNLOCK(adapter);
3215 } /* ixv_handle_link */
3216
3217 /************************************************************************
3218 * ixv_check_link - Used in the local timer to poll for link changes
3219 ************************************************************************/
3220 static s32
3221 ixv_check_link(struct adapter *adapter)
3222 {
3223 s32 error;
3224
3225 KASSERT(mutex_owned(&adapter->core_mtx));
3226
3227 adapter->hw.mac.get_link_status = TRUE;
3228
3229 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3230 &adapter->link_speed, &adapter->link_up, FALSE);
3231 ixv_update_link_status(adapter);
3232
3233 return error;
3234 } /* ixv_check_link */
3235