ixv.c revision 1.159 1 /*$NetBSD: ixv.c,v 1.159 2021/04/30 06:55:32 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.159 2021/04/30 06:55:32 msaitoh Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #include "opt_ixgbe.h"
45 #endif
46
47 #include "ixgbe.h"
48 #include "vlan.h"
49
50 /************************************************************************
51 * Driver version
52 ************************************************************************/
53 static const char ixv_driver_version[] = "2.0.1-k";
54 /* XXX NetBSD: + 1.5.17 */
55
56 /************************************************************************
57 * PCI Device ID Table
58 *
59 * Used by probe to select devices to load on
60 * Last field stores an index into ixv_strings
61 * Last entry must be all 0s
62 *
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
64 ************************************************************************/
65 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
66 {
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
72 /* required last entry */
73 {0, 0, 0, 0, 0}
74 };
75
76 /************************************************************************
77 * Table of branding strings
78 ************************************************************************/
79 static const char *ixv_strings[] = {
80 "Intel(R) PRO/10GbE Virtual Function Network Driver"
81 };
82
83 /*********************************************************************
84 * Function prototypes
85 *********************************************************************/
86 static int ixv_probe(device_t, cfdata_t, void *);
87 static void ixv_attach(device_t, device_t, void *);
88 static int ixv_detach(device_t, int);
89 #if 0
90 static int ixv_shutdown(device_t);
91 #endif
92 static int ixv_ifflags_cb(struct ethercom *);
93 static int ixv_ioctl(struct ifnet *, u_long, void *);
94 static int ixv_init(struct ifnet *);
95 static void ixv_init_locked(struct adapter *);
96 static void ixv_ifstop(struct ifnet *, int);
97 static void ixv_stop_locked(void *);
98 static void ixv_init_device_features(struct adapter *);
99 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
100 static int ixv_media_change(struct ifnet *);
101 static int ixv_allocate_pci_resources(struct adapter *,
102 const struct pci_attach_args *);
103 static void ixv_free_deferred_handlers(struct adapter *);
104 static int ixv_allocate_msix(struct adapter *,
105 const struct pci_attach_args *);
106 static int ixv_configure_interrupts(struct adapter *);
107 static void ixv_free_pci_resources(struct adapter *);
108 static void ixv_local_timer(void *);
109 static void ixv_handle_timer(struct work *, void *);
110 static int ixv_setup_interface(device_t, struct adapter *);
111 static void ixv_schedule_admin_tasklet(struct adapter *);
112 static int ixv_negotiate_api(struct adapter *);
113
114 static void ixv_initialize_transmit_units(struct adapter *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_initialize_rss_mapping(struct adapter *);
117 static s32 ixv_check_link(struct adapter *);
118
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static int ixv_set_rxfilter(struct adapter *);
122 static void ixv_update_link_status(struct adapter *);
123 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
124 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
125 static void ixv_configure_ivars(struct adapter *);
126 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
127 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
128
129 static void ixv_setup_vlan_tagging(struct adapter *);
130 static int ixv_setup_vlan_support(struct adapter *);
131 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
132 static int ixv_register_vlan(struct adapter *, u16);
133 static int ixv_unregister_vlan(struct adapter *, u16);
134
135 static void ixv_add_device_sysctls(struct adapter *);
136 static void ixv_save_stats(struct adapter *);
137 static void ixv_init_stats(struct adapter *);
138 static void ixv_update_stats(struct adapter *);
139 static void ixv_add_stats_sysctls(struct adapter *);
140 static void ixv_clear_evcnt(struct adapter *);
141
142 /* Sysctl handlers */
143 static void ixv_set_sysctl_value(struct adapter *, const char *,
144 const char *, int *, int);
145 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
146 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
147 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
148 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
149 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
150 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
151
152 /* The MSI-X Interrupt handlers */
153 static int ixv_msix_que(void *);
154 static int ixv_msix_mbx(void *);
155
156 /* Event handlers running on workqueue */
157 static void ixv_handle_que(void *);
158
159 /* Deferred workqueue handlers */
160 static void ixv_handle_admin(struct work *, void *);
161 static void ixv_handle_que_work(struct work *, void *);
162
163 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
164 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
165
166 /************************************************************************
167 * NetBSD Device Interface Entry Points
168 ************************************************************************/
169 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
170 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
171 DVF_DETACH_SHUTDOWN);
172
173 #if 0
174 static driver_t ixv_driver = {
175 "ixv", ixv_methods, sizeof(struct adapter),
176 };
177
178 devclass_t ixv_devclass;
179 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
180 MODULE_DEPEND(ixv, pci, 1, 1, 1);
181 MODULE_DEPEND(ixv, ether, 1, 1, 1);
182 #endif
183
184 /*
185 * TUNEABLE PARAMETERS:
186 */
187
188 /* Number of Queues - do not exceed MSI-X vectors - 1 */
189 static int ixv_num_queues = 0;
190 #define TUNABLE_INT(__x, __y)
191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
192
193 /*
194 * AIM: Adaptive Interrupt Moderation
195 * which means that the interrupt rate
196 * is varied over time based on the
197 * traffic for that interrupt vector
198 */
199 static bool ixv_enable_aim = false;
200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
201
202 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
203 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
204
205 /* How many packets rxeof tries to clean at a time */
206 static int ixv_rx_process_limit = 256;
207 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
208
209 /* How many packets txeof tries to clean at a time */
210 static int ixv_tx_process_limit = 256;
211 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
212
213 /* Which packet processing uses workqueue or softint */
214 static bool ixv_txrx_workqueue = false;
215
216 /*
217 * Number of TX descriptors per ring,
218 * setting higher than RX as this seems
219 * the better performing choice.
220 */
221 static int ixv_txd = PERFORM_TXD;
222 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
223
224 /* Number of RX descriptors per ring */
225 static int ixv_rxd = PERFORM_RXD;
226 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227
228 /* Legacy Transmit (single queue) */
229 static int ixv_enable_legacy_tx = 0;
230 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
231
232 #ifdef NET_MPSAFE
233 #define IXGBE_MPSAFE 1
234 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
235 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
236 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
237 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
238 #else
239 #define IXGBE_CALLOUT_FLAGS 0
240 #define IXGBE_SOFTINT_FLAGS 0
241 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
242 #define IXGBE_TASKLET_WQ_FLAGS 0
243 #endif
244 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
245
246 #if 0
247 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
248 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
249 #endif
250
251 /************************************************************************
252 * ixv_probe - Device identification routine
253 *
254 * Determines if the driver should be loaded on
255 * adapter based on its PCI vendor/device ID.
256 *
257 * return BUS_PROBE_DEFAULT on success, positive on failure
258 ************************************************************************/
259 static int
260 ixv_probe(device_t dev, cfdata_t cf, void *aux)
261 {
262 #ifdef __HAVE_PCI_MSI_MSIX
263 const struct pci_attach_args *pa = aux;
264
265 return (ixv_lookup(pa) != NULL) ? 1 : 0;
266 #else
267 return 0;
268 #endif
269 } /* ixv_probe */
270
271 static const ixgbe_vendor_info_t *
272 ixv_lookup(const struct pci_attach_args *pa)
273 {
274 const ixgbe_vendor_info_t *ent;
275 pcireg_t subid;
276
277 INIT_DEBUGOUT("ixv_lookup: begin");
278
279 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
280 return NULL;
281
282 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
283
284 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
285 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
286 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
287 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
288 (ent->subvendor_id == 0)) &&
289 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
290 (ent->subdevice_id == 0))) {
291 return ent;
292 }
293 }
294
295 return NULL;
296 }
297
298 /************************************************************************
299 * ixv_attach - Device initialization routine
300 *
301 * Called when the driver is being loaded.
302 * Identifies the type of hardware, allocates all resources
303 * and initializes the hardware.
304 *
305 * return 0 on success, positive on failure
306 ************************************************************************/
307 static void
308 ixv_attach(device_t parent, device_t dev, void *aux)
309 {
310 struct adapter *adapter;
311 struct ixgbe_hw *hw;
312 int error = 0;
313 pcireg_t id, subid;
314 const ixgbe_vendor_info_t *ent;
315 const struct pci_attach_args *pa = aux;
316 const char *apivstr;
317 const char *str;
318 char wqname[MAXCOMLEN];
319 char buf[256];
320
321 INIT_DEBUGOUT("ixv_attach: begin");
322
323 /*
324 * Make sure BUSMASTER is set, on a VM under
325 * KVM it may not be and will break things.
326 */
327 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
328
329 /* Allocate, clear, and link in our adapter structure */
330 adapter = device_private(dev);
331 adapter->hw.back = adapter;
332 adapter->dev = dev;
333 hw = &adapter->hw;
334
335 adapter->init_locked = ixv_init_locked;
336 adapter->stop_locked = ixv_stop_locked;
337
338 adapter->osdep.pc = pa->pa_pc;
339 adapter->osdep.tag = pa->pa_tag;
340 if (pci_dma64_available(pa))
341 adapter->osdep.dmat = pa->pa_dmat64;
342 else
343 adapter->osdep.dmat = pa->pa_dmat;
344 adapter->osdep.attached = false;
345
346 ent = ixv_lookup(pa);
347
348 KASSERT(ent != NULL);
349
350 aprint_normal(": %s, Version - %s\n",
351 ixv_strings[ent->index], ixv_driver_version);
352
353 /* Core Lock Init */
354 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
355
356 /* Do base PCI setup - map BAR0 */
357 if (ixv_allocate_pci_resources(adapter, pa)) {
358 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
359 error = ENXIO;
360 goto err_out;
361 }
362
363 /* SYSCTL APIs */
364 ixv_add_device_sysctls(adapter);
365
366 /* Set up the timer callout and workqueue */
367 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
368 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
369 error = workqueue_create(&adapter->timer_wq, wqname,
370 ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
371 IXGBE_TASKLET_WQ_FLAGS);
372 if (error) {
373 aprint_error_dev(dev,
374 "could not create timer workqueue (%d)\n", error);
375 goto err_out;
376 }
377
378 /* Save off the information about this board */
379 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
380 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
381 hw->vendor_id = PCI_VENDOR(id);
382 hw->device_id = PCI_PRODUCT(id);
383 hw->revision_id =
384 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
385 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
386 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
387
388 /* A subset of set_mac_type */
389 switch (hw->device_id) {
390 case IXGBE_DEV_ID_82599_VF:
391 hw->mac.type = ixgbe_mac_82599_vf;
392 str = "82599 VF";
393 break;
394 case IXGBE_DEV_ID_X540_VF:
395 hw->mac.type = ixgbe_mac_X540_vf;
396 str = "X540 VF";
397 break;
398 case IXGBE_DEV_ID_X550_VF:
399 hw->mac.type = ixgbe_mac_X550_vf;
400 str = "X550 VF";
401 break;
402 case IXGBE_DEV_ID_X550EM_X_VF:
403 hw->mac.type = ixgbe_mac_X550EM_x_vf;
404 str = "X550EM X VF";
405 break;
406 case IXGBE_DEV_ID_X550EM_A_VF:
407 hw->mac.type = ixgbe_mac_X550EM_a_vf;
408 str = "X550EM A VF";
409 break;
410 default:
411 /* Shouldn't get here since probe succeeded */
412 aprint_error_dev(dev, "Unknown device ID!\n");
413 error = ENXIO;
414 goto err_out;
415 break;
416 }
417 aprint_normal_dev(dev, "device %s\n", str);
418
419 ixv_init_device_features(adapter);
420
421 /* Initialize the shared code */
422 error = ixgbe_init_ops_vf(hw);
423 if (error) {
424 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
425 error = EIO;
426 goto err_out;
427 }
428
429 /* Setup the mailbox */
430 ixgbe_init_mbx_params_vf(hw);
431
432 /* Set the right number of segments */
433 adapter->num_segs = IXGBE_82599_SCATTER;
434
435 /* Reset mbox api to 1.0 */
436 error = hw->mac.ops.reset_hw(hw);
437 if (error == IXGBE_ERR_RESET_FAILED)
438 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
439 else if (error)
440 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
441 error);
442 if (error) {
443 error = EIO;
444 goto err_out;
445 }
446
447 error = hw->mac.ops.init_hw(hw);
448 if (error) {
449 aprint_error_dev(dev, "...init_hw() failed!\n");
450 error = EIO;
451 goto err_out;
452 }
453
454 /* Negotiate mailbox API version */
455 error = ixv_negotiate_api(adapter);
456 if (error)
457 aprint_normal_dev(dev,
458 "MBX API negotiation failed during attach!\n");
459 switch (hw->api_version) {
460 case ixgbe_mbox_api_10:
461 apivstr = "1.0";
462 break;
463 case ixgbe_mbox_api_20:
464 apivstr = "2.0";
465 break;
466 case ixgbe_mbox_api_11:
467 apivstr = "1.1";
468 break;
469 case ixgbe_mbox_api_12:
470 apivstr = "1.2";
471 break;
472 case ixgbe_mbox_api_13:
473 apivstr = "1.3";
474 break;
475 default:
476 apivstr = "unknown";
477 break;
478 }
479 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
480
481 /* If no mac address was assigned, make a random one */
482 if (!ixv_check_ether_addr(hw->mac.addr)) {
483 u8 addr[ETHER_ADDR_LEN];
484 uint64_t rndval = cprng_strong64();
485
486 memcpy(addr, &rndval, sizeof(addr));
487 addr[0] &= 0xFE;
488 addr[0] |= 0x02;
489 bcopy(addr, hw->mac.addr, sizeof(addr));
490 }
491
492 /* Register for VLAN events */
493 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
494
495 /* Sysctls for limiting the amount of work done in the taskqueues */
496 ixv_set_sysctl_value(adapter, "rx_processing_limit",
497 "max number of rx packets to process",
498 &adapter->rx_process_limit, ixv_rx_process_limit);
499
500 ixv_set_sysctl_value(adapter, "tx_processing_limit",
501 "max number of tx packets to process",
502 &adapter->tx_process_limit, ixv_tx_process_limit);
503
504 /* Do descriptor calc and sanity checks */
505 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
506 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
507 aprint_error_dev(dev, "TXD config issue, using default!\n");
508 adapter->num_tx_desc = DEFAULT_TXD;
509 } else
510 adapter->num_tx_desc = ixv_txd;
511
512 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
513 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
514 aprint_error_dev(dev, "RXD config issue, using default!\n");
515 adapter->num_rx_desc = DEFAULT_RXD;
516 } else
517 adapter->num_rx_desc = ixv_rxd;
518
519 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
520
521 /* Setup MSI-X */
522 error = ixv_configure_interrupts(adapter);
523 if (error)
524 goto err_out;
525
526 /* Allocate our TX/RX Queues */
527 if (ixgbe_allocate_queues(adapter)) {
528 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
529 error = ENOMEM;
530 goto err_out;
531 }
532
533 /* hw.ix defaults init */
534 adapter->enable_aim = ixv_enable_aim;
535
536 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
537
538 error = ixv_allocate_msix(adapter, pa);
539 if (error) {
540 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
541 goto err_late;
542 }
543
544 /* Setup OS specific network interface */
545 error = ixv_setup_interface(dev, adapter);
546 if (error != 0) {
547 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
548 goto err_late;
549 }
550
551 /* Do the stats setup */
552 ixv_save_stats(adapter);
553 ixv_init_stats(adapter);
554 ixv_add_stats_sysctls(adapter);
555
556 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
557 ixgbe_netmap_attach(adapter);
558
559 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
560 aprint_verbose_dev(dev, "feature cap %s\n", buf);
561 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
562 aprint_verbose_dev(dev, "feature ena %s\n", buf);
563
564 INIT_DEBUGOUT("ixv_attach: end");
565 adapter->osdep.attached = true;
566
567 return;
568
569 err_late:
570 ixgbe_free_queues(adapter);
571 err_out:
572 ixv_free_pci_resources(adapter);
573 IXGBE_CORE_LOCK_DESTROY(adapter);
574
575 return;
576 } /* ixv_attach */
577
578 /************************************************************************
579 * ixv_detach - Device removal routine
580 *
581 * Called when the driver is being removed.
582 * Stops the adapter and deallocates all the resources
583 * that were allocated for driver operation.
584 *
585 * return 0 on success, positive on failure
586 ************************************************************************/
587 static int
588 ixv_detach(device_t dev, int flags)
589 {
590 struct adapter *adapter = device_private(dev);
591 struct ixgbe_hw *hw = &adapter->hw;
592 struct tx_ring *txr = adapter->tx_rings;
593 struct rx_ring *rxr = adapter->rx_rings;
594 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
595
596 INIT_DEBUGOUT("ixv_detach: begin");
597 if (adapter->osdep.attached == false)
598 return 0;
599
600 /* Stop the interface. Callouts are stopped in it. */
601 ixv_ifstop(adapter->ifp, 1);
602
603 #if NVLAN > 0
604 /* Make sure VLANs are not using driver */
605 if (!VLAN_ATTACHED(&adapter->osdep.ec))
606 ; /* nothing to do: no VLANs */
607 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
608 vlan_ifdetach(adapter->ifp);
609 else {
610 aprint_error_dev(dev, "VLANs in use, detach first\n");
611 return EBUSY;
612 }
613 #endif
614
615 ether_ifdetach(adapter->ifp);
616 callout_halt(&adapter->timer, NULL);
617 ixv_free_deferred_handlers(adapter);
618
619 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
620 netmap_detach(adapter->ifp);
621
622 ixv_free_pci_resources(adapter);
623 #if 0 /* XXX the NetBSD port is probably missing something here */
624 bus_generic_detach(dev);
625 #endif
626 if_detach(adapter->ifp);
627 ifmedia_fini(&adapter->media);
628 if_percpuq_destroy(adapter->ipq);
629
630 sysctl_teardown(&adapter->sysctllog);
631 evcnt_detach(&adapter->efbig_tx_dma_setup);
632 evcnt_detach(&adapter->mbuf_defrag_failed);
633 evcnt_detach(&adapter->efbig2_tx_dma_setup);
634 evcnt_detach(&adapter->einval_tx_dma_setup);
635 evcnt_detach(&adapter->other_tx_dma_setup);
636 evcnt_detach(&adapter->eagain_tx_dma_setup);
637 evcnt_detach(&adapter->enomem_tx_dma_setup);
638 evcnt_detach(&adapter->watchdog_events);
639 evcnt_detach(&adapter->tso_err);
640 evcnt_detach(&adapter->admin_irqev);
641 evcnt_detach(&adapter->link_workev);
642
643 txr = adapter->tx_rings;
644 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
645 evcnt_detach(&adapter->queues[i].irqs);
646 evcnt_detach(&adapter->queues[i].handleq);
647 evcnt_detach(&adapter->queues[i].req);
648 evcnt_detach(&txr->no_desc_avail);
649 evcnt_detach(&txr->total_packets);
650 evcnt_detach(&txr->tso_tx);
651 #ifndef IXGBE_LEGACY_TX
652 evcnt_detach(&txr->pcq_drops);
653 #endif
654
655 evcnt_detach(&rxr->rx_packets);
656 evcnt_detach(&rxr->rx_bytes);
657 evcnt_detach(&rxr->rx_copies);
658 evcnt_detach(&rxr->no_jmbuf);
659 evcnt_detach(&rxr->rx_discarded);
660 }
661 evcnt_detach(&stats->ipcs);
662 evcnt_detach(&stats->l4cs);
663 evcnt_detach(&stats->ipcs_bad);
664 evcnt_detach(&stats->l4cs_bad);
665
666 /* Packet Reception Stats */
667 evcnt_detach(&stats->vfgorc);
668 evcnt_detach(&stats->vfgprc);
669 evcnt_detach(&stats->vfmprc);
670
671 /* Packet Transmission Stats */
672 evcnt_detach(&stats->vfgotc);
673 evcnt_detach(&stats->vfgptc);
674
675 /* Mailbox Stats */
676 evcnt_detach(&hw->mbx.stats.msgs_tx);
677 evcnt_detach(&hw->mbx.stats.msgs_rx);
678 evcnt_detach(&hw->mbx.stats.acks);
679 evcnt_detach(&hw->mbx.stats.reqs);
680 evcnt_detach(&hw->mbx.stats.rsts);
681
682 ixgbe_free_queues(adapter);
683
684 IXGBE_CORE_LOCK_DESTROY(adapter);
685
686 return (0);
687 } /* ixv_detach */
688
689 /************************************************************************
690 * ixv_init_locked - Init entry point
691 *
692 * Used in two ways: It is used by the stack as an init entry
693 * point in network interface structure. It is also used
694 * by the driver as a hw/sw initialization routine to get
695 * to a consistent state.
696 *
697 * return 0 on success, positive on failure
698 ************************************************************************/
699 static void
700 ixv_init_locked(struct adapter *adapter)
701 {
702 struct ifnet *ifp = adapter->ifp;
703 device_t dev = adapter->dev;
704 struct ixgbe_hw *hw = &adapter->hw;
705 struct ix_queue *que;
706 int error = 0;
707 uint32_t mask;
708 int i;
709
710 INIT_DEBUGOUT("ixv_init_locked: begin");
711 KASSERT(mutex_owned(&adapter->core_mtx));
712 hw->adapter_stopped = FALSE;
713 hw->mac.ops.stop_adapter(hw);
714 callout_stop(&adapter->timer);
715 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
716 que->disabled_count = 0;
717
718 adapter->max_frame_size =
719 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
720
721 /* reprogram the RAR[0] in case user changed it. */
722 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
723
724 /* Get the latest mac address, User can use a LAA */
725 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
726 IXGBE_ETH_LENGTH_OF_ADDRESS);
727 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
728
729 /* Prepare transmit descriptors and buffers */
730 if (ixgbe_setup_transmit_structures(adapter)) {
731 aprint_error_dev(dev, "Could not setup transmit structures\n");
732 ixv_stop_locked(adapter);
733 return;
734 }
735
736 /* Reset VF and renegotiate mailbox API version */
737 hw->mac.ops.reset_hw(hw);
738 hw->mac.ops.start_hw(hw);
739 error = ixv_negotiate_api(adapter);
740 if (error)
741 device_printf(dev,
742 "Mailbox API negotiation failed in init_locked!\n");
743
744 ixv_initialize_transmit_units(adapter);
745
746 /* Setup Multicast table */
747 ixv_set_rxfilter(adapter);
748
749 /*
750 * Determine the correct mbuf pool
751 * for doing jumbo/headersplit
752 */
753 if (adapter->max_frame_size <= MCLBYTES)
754 adapter->rx_mbuf_sz = MCLBYTES;
755 else
756 adapter->rx_mbuf_sz = MJUMPAGESIZE;
757
758 /* Prepare receive descriptors and buffers */
759 if (ixgbe_setup_receive_structures(adapter)) {
760 device_printf(dev, "Could not setup receive structures\n");
761 ixv_stop_locked(adapter);
762 return;
763 }
764
765 /* Configure RX settings */
766 ixv_initialize_receive_units(adapter);
767
768 /* Initialize variable holding task enqueue requests interrupts */
769 adapter->task_requests = 0;
770
771 /* Set up VLAN offload and filter */
772 ixv_setup_vlan_support(adapter);
773
774 /* Set up MSI-X routing */
775 ixv_configure_ivars(adapter);
776
777 /* Set up auto-mask */
778 mask = (1 << adapter->vector);
779 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
780 mask |= (1 << que->msix);
781 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
782
783 /* Set moderation on the Link interrupt */
784 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
785
786 /* Stats init */
787 ixv_init_stats(adapter);
788
789 /* Config/Enable Link */
790 hw->mac.get_link_status = TRUE;
791 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
792 FALSE);
793
794 /* Start watchdog */
795 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
796 atomic_store_relaxed(&adapter->timer_pending, 0);
797
798 /* OK to schedule workqueues. */
799 adapter->schedule_wqs_ok = true;
800
801 /* And now turn on interrupts */
802 ixv_enable_intr(adapter);
803
804 /* Update saved flags. See ixgbe_ifflags_cb() */
805 adapter->if_flags = ifp->if_flags;
806 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
807
808 /* Now inform the stack we're ready */
809 ifp->if_flags |= IFF_RUNNING;
810 ifp->if_flags &= ~IFF_OACTIVE;
811
812 return;
813 } /* ixv_init_locked */
814
815 /************************************************************************
816 * ixv_enable_queue
817 ************************************************************************/
818 static inline void
819 ixv_enable_queue(struct adapter *adapter, u32 vector)
820 {
821 struct ixgbe_hw *hw = &adapter->hw;
822 struct ix_queue *que = &adapter->queues[vector];
823 u32 queue = 1UL << vector;
824 u32 mask;
825
826 mutex_enter(&que->dc_mtx);
827 if (que->disabled_count > 0 && --que->disabled_count > 0)
828 goto out;
829
830 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
831 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
832 out:
833 mutex_exit(&que->dc_mtx);
834 } /* ixv_enable_queue */
835
836 /************************************************************************
837 * ixv_disable_queue
838 ************************************************************************/
839 static inline void
840 ixv_disable_queue(struct adapter *adapter, u32 vector)
841 {
842 struct ixgbe_hw *hw = &adapter->hw;
843 struct ix_queue *que = &adapter->queues[vector];
844 u32 queue = 1UL << vector;
845 u32 mask;
846
847 mutex_enter(&que->dc_mtx);
848 if (que->disabled_count++ > 0)
849 goto out;
850
851 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
852 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
853 out:
854 mutex_exit(&que->dc_mtx);
855 } /* ixv_disable_queue */
856
857 #if 0
858 static inline void
859 ixv_rearm_queues(struct adapter *adapter, u64 queues)
860 {
861 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
862 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
863 } /* ixv_rearm_queues */
864 #endif
865
866
867 /************************************************************************
868 * ixv_msix_que - MSI-X Queue Interrupt Service routine
869 ************************************************************************/
870 static int
871 ixv_msix_que(void *arg)
872 {
873 struct ix_queue *que = arg;
874 struct adapter *adapter = que->adapter;
875 struct tx_ring *txr = que->txr;
876 struct rx_ring *rxr = que->rxr;
877 bool more;
878 u32 newitr = 0;
879
880 ixv_disable_queue(adapter, que->msix);
881 ++que->irqs.ev_count;
882
883 #ifdef __NetBSD__
884 /* Don't run ixgbe_rxeof in interrupt context */
885 more = true;
886 #else
887 more = ixgbe_rxeof(que);
888 #endif
889
890 IXGBE_TX_LOCK(txr);
891 ixgbe_txeof(txr);
892 IXGBE_TX_UNLOCK(txr);
893
894 /* Do AIM now? */
895
896 if (adapter->enable_aim == false)
897 goto no_calc;
898 /*
899 * Do Adaptive Interrupt Moderation:
900 * - Write out last calculated setting
901 * - Calculate based on average size over
902 * the last interval.
903 */
904 if (que->eitr_setting)
905 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
906
907 que->eitr_setting = 0;
908
909 /* Idle, do nothing */
910 if ((txr->bytes == 0) && (rxr->bytes == 0))
911 goto no_calc;
912
913 if ((txr->bytes) && (txr->packets))
914 newitr = txr->bytes/txr->packets;
915 if ((rxr->bytes) && (rxr->packets))
916 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
917 newitr += 24; /* account for hardware frame, crc */
918
919 /* set an upper boundary */
920 newitr = uimin(newitr, 3000);
921
922 /* Be nice to the mid range */
923 if ((newitr > 300) && (newitr < 1200))
924 newitr = (newitr / 3);
925 else
926 newitr = (newitr / 2);
927
928 /*
929 * When RSC is used, ITR interval must be larger than RSC_DELAY.
930 * Currently, we use 2us for RSC_DELAY. The minimum value is always
931 * greater than 2us on 100M (and 10M?(not documented)), but it's not
932 * on 1G and higher.
933 */
934 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
935 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
936 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
937 newitr = IXGBE_MIN_RSC_EITR_10G1G;
938 }
939
940 /* save for next interrupt */
941 que->eitr_setting = newitr;
942
943 /* Reset state */
944 txr->bytes = 0;
945 txr->packets = 0;
946 rxr->bytes = 0;
947 rxr->packets = 0;
948
949 no_calc:
950 if (more)
951 softint_schedule(que->que_si);
952 else /* Re-enable this interrupt */
953 ixv_enable_queue(adapter, que->msix);
954
955 return 1;
956 } /* ixv_msix_que */
957
958 /************************************************************************
959 * ixv_msix_mbx
960 ************************************************************************/
961 static int
962 ixv_msix_mbx(void *arg)
963 {
964 struct adapter *adapter = arg;
965 struct ixgbe_hw *hw = &adapter->hw;
966
967 ++adapter->admin_irqev.ev_count;
968 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
969
970 /* Link status change */
971 hw->mac.get_link_status = TRUE;
972 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX);
973 ixv_schedule_admin_tasklet(adapter);
974
975 return 1;
976 } /* ixv_msix_mbx */
977
978 static void
979 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
980 {
981
982 /*
983 * Newer devices than 82598 have VF function, so this function is
984 * simple.
985 */
986 itr |= IXGBE_EITR_CNT_WDIS;
987
988 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
989 }
990
991
992 /************************************************************************
993 * ixv_media_status - Media Ioctl callback
994 *
995 * Called whenever the user queries the status of
996 * the interface using ifconfig.
997 ************************************************************************/
998 static void
999 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1000 {
1001 struct adapter *adapter = ifp->if_softc;
1002
1003 INIT_DEBUGOUT("ixv_media_status: begin");
1004 ixv_update_link_status(adapter);
1005
1006 ifmr->ifm_status = IFM_AVALID;
1007 ifmr->ifm_active = IFM_ETHER;
1008
1009 if (adapter->link_active != LINK_STATE_UP) {
1010 ifmr->ifm_active |= IFM_NONE;
1011 return;
1012 }
1013
1014 ifmr->ifm_status |= IFM_ACTIVE;
1015
1016 switch (adapter->link_speed) {
1017 case IXGBE_LINK_SPEED_10GB_FULL:
1018 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1019 break;
1020 case IXGBE_LINK_SPEED_5GB_FULL:
1021 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1022 break;
1023 case IXGBE_LINK_SPEED_2_5GB_FULL:
1024 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1025 break;
1026 case IXGBE_LINK_SPEED_1GB_FULL:
1027 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1028 break;
1029 case IXGBE_LINK_SPEED_100_FULL:
1030 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1031 break;
1032 case IXGBE_LINK_SPEED_10_FULL:
1033 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1034 break;
1035 }
1036
1037 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1038 } /* ixv_media_status */
1039
1040 /************************************************************************
1041 * ixv_media_change - Media Ioctl callback
1042 *
1043 * Called when the user changes speed/duplex using
1044 * media/mediopt option with ifconfig.
1045 ************************************************************************/
1046 static int
1047 ixv_media_change(struct ifnet *ifp)
1048 {
1049 struct adapter *adapter = ifp->if_softc;
1050 struct ifmedia *ifm = &adapter->media;
1051
1052 INIT_DEBUGOUT("ixv_media_change: begin");
1053
1054 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1055 return (EINVAL);
1056
1057 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1058 case IFM_AUTO:
1059 break;
1060 default:
1061 device_printf(adapter->dev, "Only auto media type\n");
1062 return (EINVAL);
1063 }
1064
1065 return (0);
1066 } /* ixv_media_change */
1067
1068 static void
1069 ixv_schedule_admin_tasklet(struct adapter *adapter)
1070 {
1071 if (adapter->schedule_wqs_ok) {
1072 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1073 workqueue_enqueue(adapter->admin_wq,
1074 &adapter->admin_wc, NULL);
1075 }
1076 }
1077
1078 /************************************************************************
1079 * ixv_negotiate_api
1080 *
1081 * Negotiate the Mailbox API with the PF;
1082 * start with the most featured API first.
1083 ************************************************************************/
1084 static int
1085 ixv_negotiate_api(struct adapter *adapter)
1086 {
1087 struct ixgbe_hw *hw = &adapter->hw;
1088 int mbx_api[] = { ixgbe_mbox_api_13,
1089 ixgbe_mbox_api_12,
1090 ixgbe_mbox_api_11,
1091 ixgbe_mbox_api_10,
1092 ixgbe_mbox_api_unknown };
1093 int i = 0;
1094
1095 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1096 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1097 return (0);
1098 i++;
1099 }
1100
1101 return (EINVAL);
1102 } /* ixv_negotiate_api */
1103
1104
1105 /************************************************************************
1106 * ixv_set_rxfilter - Multicast Update
1107 *
1108 * Called whenever multicast address list is updated.
1109 ************************************************************************/
1110 static int
1111 ixv_set_rxfilter(struct adapter *adapter)
1112 {
1113 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
1114 struct ifnet *ifp = adapter->ifp;
1115 struct ixgbe_hw *hw = &adapter->hw;
1116 u8 *update_ptr;
1117 int mcnt = 0;
1118 struct ethercom *ec = &adapter->osdep.ec;
1119 struct ether_multi *enm;
1120 struct ether_multistep step;
1121 bool overflow = false;
1122 int error, rc = 0;
1123
1124 KASSERT(mutex_owned(&adapter->core_mtx));
1125 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
1126
1127 /* 1: For PROMISC */
1128 if (ifp->if_flags & IFF_PROMISC) {
1129 error = hw->mac.ops.update_xcast_mode(hw,
1130 IXGBEVF_XCAST_MODE_PROMISC);
1131 if (error == IXGBE_ERR_NOT_TRUSTED) {
1132 device_printf(adapter->dev,
1133 "this interface is not trusted\n");
1134 error = EPERM;
1135 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1136 device_printf(adapter->dev,
1137 "the PF doesn't support promisc mode\n");
1138 error = EOPNOTSUPP;
1139 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
1140 device_printf(adapter->dev,
1141 "the PF may not in promisc mode\n");
1142 error = EINVAL;
1143 } else if (error) {
1144 device_printf(adapter->dev,
1145 "failed to set promisc mode. error = %d\n",
1146 error);
1147 error = EIO;
1148 } else
1149 return 0;
1150 rc = error;
1151 }
1152
1153 /* 2: For ALLMULTI or normal */
1154 ETHER_LOCK(ec);
1155 ETHER_FIRST_MULTI(step, ec, enm);
1156 while (enm != NULL) {
1157 if ((mcnt >= IXGBE_MAX_VF_MC) ||
1158 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1159 ETHER_ADDR_LEN) != 0)) {
1160 overflow = true;
1161 break;
1162 }
1163 bcopy(enm->enm_addrlo,
1164 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1165 IXGBE_ETH_LENGTH_OF_ADDRESS);
1166 mcnt++;
1167 ETHER_NEXT_MULTI(step, enm);
1168 }
1169 ETHER_UNLOCK(ec);
1170
1171 /* 3: For ALLMULTI */
1172 if (overflow) {
1173 error = hw->mac.ops.update_xcast_mode(hw,
1174 IXGBEVF_XCAST_MODE_ALLMULTI);
1175 if (error == IXGBE_ERR_NOT_TRUSTED) {
1176 device_printf(adapter->dev,
1177 "this interface is not trusted\n");
1178 error = EPERM;
1179 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1180 device_printf(adapter->dev,
1181 "the PF doesn't support allmulti mode\n");
1182 error = EOPNOTSUPP;
1183 } else if (error) {
1184 device_printf(adapter->dev,
1185 "number of Ethernet multicast addresses "
1186 "exceeds the limit (%d). error = %d\n",
1187 IXGBE_MAX_VF_MC, error);
1188 error = ENOSPC;
1189 } else {
1190 ETHER_LOCK(ec);
1191 ec->ec_flags |= ETHER_F_ALLMULTI;
1192 ETHER_UNLOCK(ec);
1193 return rc; /* Promisc might have failed */
1194 }
1195
1196 if (rc == 0)
1197 rc = error;
1198
1199 /* Continue to update the multicast table as many as we can */
1200 }
1201
1202 /* 4: For normal operation */
1203 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
1204 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
1205 /* Normal operation */
1206 ETHER_LOCK(ec);
1207 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1208 ETHER_UNLOCK(ec);
1209 error = 0;
1210 } else if (error) {
1211 device_printf(adapter->dev,
1212 "failed to set Ethernet multicast address "
1213 "operation to normal. error = %d\n", error);
1214 }
1215
1216 update_ptr = mta;
1217
1218 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw,
1219 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1220 if (rc == 0)
1221 rc = error;
1222
1223 return rc;
1224 } /* ixv_set_rxfilter */
1225
1226 /************************************************************************
1227 * ixv_mc_array_itr
1228 *
1229 * An iterator function needed by the multicast shared code.
1230 * It feeds the shared code routine the addresses in the
1231 * array of ixv_set_rxfilter() one by one.
1232 ************************************************************************/
1233 static u8 *
1234 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1235 {
1236 u8 *addr = *update_ptr;
1237 u8 *newptr;
1238
1239 *vmdq = 0;
1240
1241 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1242 *update_ptr = newptr;
1243
1244 return addr;
1245 } /* ixv_mc_array_itr */
1246
1247 /************************************************************************
1248 * ixv_local_timer - Timer routine
1249 *
1250 * Checks for link status, updates statistics,
1251 * and runs the watchdog check.
1252 ************************************************************************/
1253 static void
1254 ixv_local_timer(void *arg)
1255 {
1256 struct adapter *adapter = arg;
1257
1258 if (adapter->schedule_wqs_ok) {
1259 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
1260 workqueue_enqueue(adapter->timer_wq,
1261 &adapter->timer_wc, NULL);
1262 }
1263 }
1264
1265 static void
1266 ixv_handle_timer(struct work *wk, void *context)
1267 {
1268 struct adapter *adapter = context;
1269 device_t dev = adapter->dev;
1270 struct ix_queue *que = adapter->queues;
1271 u64 queues = 0;
1272 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1273 int hung = 0;
1274 int i;
1275
1276 IXGBE_CORE_LOCK(adapter);
1277
1278 if (ixv_check_link(adapter)) {
1279 ixv_init_locked(adapter);
1280 IXGBE_CORE_UNLOCK(adapter);
1281 return;
1282 }
1283
1284 /* Stats Update */
1285 ixv_update_stats(adapter);
1286
1287 /* Update some event counters */
1288 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1289 que = adapter->queues;
1290 for (i = 0; i < adapter->num_queues; i++, que++) {
1291 struct tx_ring *txr = que->txr;
1292
1293 v0 += txr->q_efbig_tx_dma_setup;
1294 v1 += txr->q_mbuf_defrag_failed;
1295 v2 += txr->q_efbig2_tx_dma_setup;
1296 v3 += txr->q_einval_tx_dma_setup;
1297 v4 += txr->q_other_tx_dma_setup;
1298 v5 += txr->q_eagain_tx_dma_setup;
1299 v6 += txr->q_enomem_tx_dma_setup;
1300 v7 += txr->q_tso_err;
1301 }
1302 adapter->efbig_tx_dma_setup.ev_count = v0;
1303 adapter->mbuf_defrag_failed.ev_count = v1;
1304 adapter->efbig2_tx_dma_setup.ev_count = v2;
1305 adapter->einval_tx_dma_setup.ev_count = v3;
1306 adapter->other_tx_dma_setup.ev_count = v4;
1307 adapter->eagain_tx_dma_setup.ev_count = v5;
1308 adapter->enomem_tx_dma_setup.ev_count = v6;
1309 adapter->tso_err.ev_count = v7;
1310
1311 /*
1312 * Check the TX queues status
1313 * - mark hung queues so we don't schedule on them
1314 * - watchdog only if all queues show hung
1315 */
1316 que = adapter->queues;
1317 for (i = 0; i < adapter->num_queues; i++, que++) {
1318 /* Keep track of queues with work for soft irq */
1319 if (que->txr->busy)
1320 queues |= ((u64)1 << que->me);
1321 /*
1322 * Each time txeof runs without cleaning, but there
1323 * are uncleaned descriptors it increments busy. If
1324 * we get to the MAX we declare it hung.
1325 */
1326 if (que->busy == IXGBE_QUEUE_HUNG) {
1327 ++hung;
1328 /* Mark the queue as inactive */
1329 adapter->active_queues &= ~((u64)1 << que->me);
1330 continue;
1331 } else {
1332 /* Check if we've come back from hung */
1333 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1334 adapter->active_queues |= ((u64)1 << que->me);
1335 }
1336 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1337 device_printf(dev,
1338 "Warning queue %d appears to be hung!\n", i);
1339 que->txr->busy = IXGBE_QUEUE_HUNG;
1340 ++hung;
1341 }
1342 }
1343
1344 /* Only truly watchdog if all queues show hung */
1345 if (hung == adapter->num_queues)
1346 goto watchdog;
1347 #if 0
1348 else if (queues != 0) { /* Force an IRQ on queues with work */
1349 ixv_rearm_queues(adapter, queues);
1350 }
1351 #endif
1352
1353 atomic_store_relaxed(&adapter->timer_pending, 0);
1354 IXGBE_CORE_UNLOCK(adapter);
1355 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1356
1357 return;
1358
1359 watchdog:
1360 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1361 adapter->ifp->if_flags &= ~IFF_RUNNING;
1362 adapter->watchdog_events.ev_count++;
1363 ixv_init_locked(adapter);
1364 IXGBE_CORE_UNLOCK(adapter);
1365 } /* ixv_handle_timer */
1366
1367 /************************************************************************
1368 * ixv_update_link_status - Update OS on link state
1369 *
1370 * Note: Only updates the OS on the cached link state.
1371 * The real check of the hardware only happens with
1372 * a link interrupt.
1373 ************************************************************************/
1374 static void
1375 ixv_update_link_status(struct adapter *adapter)
1376 {
1377 struct ifnet *ifp = adapter->ifp;
1378 device_t dev = adapter->dev;
1379
1380 KASSERT(mutex_owned(&adapter->core_mtx));
1381
1382 if (adapter->link_up) {
1383 if (adapter->link_active != LINK_STATE_UP) {
1384 if (bootverbose) {
1385 const char *bpsmsg;
1386
1387 switch (adapter->link_speed) {
1388 case IXGBE_LINK_SPEED_10GB_FULL:
1389 bpsmsg = "10 Gbps";
1390 break;
1391 case IXGBE_LINK_SPEED_5GB_FULL:
1392 bpsmsg = "5 Gbps";
1393 break;
1394 case IXGBE_LINK_SPEED_2_5GB_FULL:
1395 bpsmsg = "2.5 Gbps";
1396 break;
1397 case IXGBE_LINK_SPEED_1GB_FULL:
1398 bpsmsg = "1 Gbps";
1399 break;
1400 case IXGBE_LINK_SPEED_100_FULL:
1401 bpsmsg = "100 Mbps";
1402 break;
1403 case IXGBE_LINK_SPEED_10_FULL:
1404 bpsmsg = "10 Mbps";
1405 break;
1406 default:
1407 bpsmsg = "unknown speed";
1408 break;
1409 }
1410 device_printf(dev, "Link is up %s %s \n",
1411 bpsmsg, "Full Duplex");
1412 }
1413 adapter->link_active = LINK_STATE_UP;
1414 if_link_state_change(ifp, LINK_STATE_UP);
1415 }
1416 } else {
1417 /*
1418 * Do it when link active changes to DOWN. i.e.
1419 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1420 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1421 */
1422 if (adapter->link_active != LINK_STATE_DOWN) {
1423 if (bootverbose)
1424 device_printf(dev, "Link is Down\n");
1425 if_link_state_change(ifp, LINK_STATE_DOWN);
1426 adapter->link_active = LINK_STATE_DOWN;
1427 }
1428 }
1429 } /* ixv_update_link_status */
1430
1431
1432 /************************************************************************
1433 * ixv_stop - Stop the hardware
1434 *
1435 * Disables all traffic on the adapter by issuing a
1436 * global reset on the MAC and deallocates TX/RX buffers.
1437 ************************************************************************/
1438 static void
1439 ixv_ifstop(struct ifnet *ifp, int disable)
1440 {
1441 struct adapter *adapter = ifp->if_softc;
1442
1443 IXGBE_CORE_LOCK(adapter);
1444 ixv_stop_locked(adapter);
1445 IXGBE_CORE_UNLOCK(adapter);
1446
1447 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
1448 atomic_store_relaxed(&adapter->admin_pending, 0);
1449 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
1450 atomic_store_relaxed(&adapter->timer_pending, 0);
1451 }
1452
1453 static void
1454 ixv_stop_locked(void *arg)
1455 {
1456 struct ifnet *ifp;
1457 struct adapter *adapter = arg;
1458 struct ixgbe_hw *hw = &adapter->hw;
1459
1460 ifp = adapter->ifp;
1461
1462 KASSERT(mutex_owned(&adapter->core_mtx));
1463
1464 INIT_DEBUGOUT("ixv_stop_locked: begin\n");
1465 ixv_disable_intr(adapter);
1466
1467 /* Tell the stack that the interface is no longer active */
1468 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1469
1470 hw->mac.ops.reset_hw(hw);
1471 adapter->hw.adapter_stopped = FALSE;
1472 hw->mac.ops.stop_adapter(hw);
1473 callout_stop(&adapter->timer);
1474
1475 /* Don't schedule workqueues. */
1476 adapter->schedule_wqs_ok = false;
1477
1478 /* reprogram the RAR[0] in case user changed it. */
1479 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1480
1481 return;
1482 } /* ixv_stop_locked */
1483
1484
1485 /************************************************************************
1486 * ixv_allocate_pci_resources
1487 ************************************************************************/
1488 static int
1489 ixv_allocate_pci_resources(struct adapter *adapter,
1490 const struct pci_attach_args *pa)
1491 {
1492 pcireg_t memtype, csr;
1493 device_t dev = adapter->dev;
1494 bus_addr_t addr;
1495 int flags;
1496
1497 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1498 switch (memtype) {
1499 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1500 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1501 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1502 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1503 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1504 goto map_err;
1505 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1506 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1507 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1508 }
1509 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1510 adapter->osdep.mem_size, flags,
1511 &adapter->osdep.mem_bus_space_handle) != 0) {
1512 map_err:
1513 adapter->osdep.mem_size = 0;
1514 aprint_error_dev(dev, "unable to map BAR0\n");
1515 return ENXIO;
1516 }
1517 /*
1518 * Enable address decoding for memory range in case it's not
1519 * set.
1520 */
1521 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1522 PCI_COMMAND_STATUS_REG);
1523 csr |= PCI_COMMAND_MEM_ENABLE;
1524 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1525 csr);
1526 break;
1527 default:
1528 aprint_error_dev(dev, "unexpected type on BAR0\n");
1529 return ENXIO;
1530 }
1531
1532 /* Pick up the tuneable queues */
1533 adapter->num_queues = ixv_num_queues;
1534
1535 return (0);
1536 } /* ixv_allocate_pci_resources */
1537
1538 static void
1539 ixv_free_deferred_handlers(struct adapter *adapter)
1540 {
1541 struct ix_queue *que = adapter->queues;
1542 struct tx_ring *txr = adapter->tx_rings;
1543 int i;
1544
1545 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
1546 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
1547 if (txr->txr_si != NULL)
1548 softint_disestablish(txr->txr_si);
1549 }
1550 if (que->que_si != NULL)
1551 softint_disestablish(que->que_si);
1552 }
1553 if (adapter->txr_wq != NULL)
1554 workqueue_destroy(adapter->txr_wq);
1555 if (adapter->txr_wq_enqueued != NULL)
1556 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
1557 if (adapter->que_wq != NULL)
1558 workqueue_destroy(adapter->que_wq);
1559
1560 /* Drain the Mailbox(link) queue */
1561 if (adapter->admin_wq != NULL) {
1562 workqueue_destroy(adapter->admin_wq);
1563 adapter->admin_wq = NULL;
1564 }
1565 if (adapter->timer_wq != NULL) {
1566 workqueue_destroy(adapter->timer_wq);
1567 adapter->timer_wq = NULL;
1568 }
1569 } /* ixv_free_deferred_handlers */
1570
1571 /************************************************************************
1572 * ixv_free_pci_resources
1573 ************************************************************************/
1574 static void
1575 ixv_free_pci_resources(struct adapter * adapter)
1576 {
1577 struct ix_queue *que = adapter->queues;
1578 int rid;
1579
1580 /*
1581 * Release all msix queue resources:
1582 */
1583 for (int i = 0; i < adapter->num_queues; i++, que++) {
1584 if (que->res != NULL)
1585 pci_intr_disestablish(adapter->osdep.pc,
1586 adapter->osdep.ihs[i]);
1587 }
1588
1589
1590 /* Clean the Mailbox interrupt last */
1591 rid = adapter->vector;
1592
1593 if (adapter->osdep.ihs[rid] != NULL) {
1594 pci_intr_disestablish(adapter->osdep.pc,
1595 adapter->osdep.ihs[rid]);
1596 adapter->osdep.ihs[rid] = NULL;
1597 }
1598
1599 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1600 adapter->osdep.nintrs);
1601
1602 if (adapter->osdep.mem_size != 0) {
1603 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1604 adapter->osdep.mem_bus_space_handle,
1605 adapter->osdep.mem_size);
1606 }
1607
1608 return;
1609 } /* ixv_free_pci_resources */
1610
1611 /************************************************************************
1612 * ixv_setup_interface
1613 *
1614 * Setup networking device structure and register an interface.
1615 ************************************************************************/
1616 static int
1617 ixv_setup_interface(device_t dev, struct adapter *adapter)
1618 {
1619 struct ethercom *ec = &adapter->osdep.ec;
1620 struct ifnet *ifp;
1621 int rv;
1622
1623 INIT_DEBUGOUT("ixv_setup_interface: begin");
1624
1625 ifp = adapter->ifp = &ec->ec_if;
1626 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1627 ifp->if_baudrate = IF_Gbps(10);
1628 ifp->if_init = ixv_init;
1629 ifp->if_stop = ixv_ifstop;
1630 ifp->if_softc = adapter;
1631 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1632 #ifdef IXGBE_MPSAFE
1633 ifp->if_extflags = IFEF_MPSAFE;
1634 #endif
1635 ifp->if_ioctl = ixv_ioctl;
1636 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1637 #if 0
1638 ixv_start_locked = ixgbe_legacy_start_locked;
1639 #endif
1640 } else {
1641 ifp->if_transmit = ixgbe_mq_start;
1642 #if 0
1643 ixv_start_locked = ixgbe_mq_start_locked;
1644 #endif
1645 }
1646 ifp->if_start = ixgbe_legacy_start;
1647 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1648 IFQ_SET_READY(&ifp->if_snd);
1649
1650 rv = if_initialize(ifp);
1651 if (rv != 0) {
1652 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1653 return rv;
1654 }
1655 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1656 ether_ifattach(ifp, adapter->hw.mac.addr);
1657 aprint_normal_dev(dev, "Ethernet address %s\n",
1658 ether_sprintf(adapter->hw.mac.addr));
1659 /*
1660 * We use per TX queue softint, so if_deferred_start_init() isn't
1661 * used.
1662 */
1663 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1664
1665 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1666
1667 /*
1668 * Tell the upper layer(s) we support long frames.
1669 */
1670 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1671
1672 /* Set capability flags */
1673 ifp->if_capabilities |= IFCAP_HWCSUM
1674 | IFCAP_TSOv4
1675 | IFCAP_TSOv6;
1676 ifp->if_capenable = 0;
1677
1678 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1679 | ETHERCAP_VLAN_HWTAGGING
1680 | ETHERCAP_VLAN_HWCSUM
1681 | ETHERCAP_JUMBO_MTU
1682 | ETHERCAP_VLAN_MTU;
1683
1684 /* Enable the above capabilities by default */
1685 ec->ec_capenable = ec->ec_capabilities;
1686
1687 /* Don't enable LRO by default */
1688 #if 0
1689 /* NetBSD doesn't support LRO yet */
1690 ifp->if_capabilities |= IFCAP_LRO;
1691 #endif
1692
1693 /*
1694 * Specify the media types supported by this adapter and register
1695 * callbacks to update media and link information
1696 */
1697 ec->ec_ifmedia = &adapter->media;
1698 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixv_media_change,
1699 ixv_media_status, &adapter->core_mtx);
1700 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1701 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1702
1703 if_register(ifp);
1704
1705 return 0;
1706 } /* ixv_setup_interface */
1707
1708
1709 /************************************************************************
1710 * ixv_initialize_transmit_units - Enable transmit unit.
1711 ************************************************************************/
1712 static void
1713 ixv_initialize_transmit_units(struct adapter *adapter)
1714 {
1715 struct tx_ring *txr = adapter->tx_rings;
1716 struct ixgbe_hw *hw = &adapter->hw;
1717 int i;
1718
1719 for (i = 0; i < adapter->num_queues; i++, txr++) {
1720 u64 tdba = txr->txdma.dma_paddr;
1721 u32 txctrl, txdctl;
1722 int j = txr->me;
1723
1724 /* Set WTHRESH to 8, burst writeback */
1725 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1726 txdctl |= (8 << 16);
1727 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1728
1729 /* Set the HW Tx Head and Tail indices */
1730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1731 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1732
1733 /* Set Tx Tail register */
1734 txr->tail = IXGBE_VFTDT(j);
1735
1736 txr->txr_no_space = false;
1737
1738 /* Set Ring parameters */
1739 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1740 (tdba & 0x00000000ffffffffULL));
1741 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1742 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1743 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1744 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1745 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1746 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1747
1748 /* Now enable */
1749 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1750 txdctl |= IXGBE_TXDCTL_ENABLE;
1751 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1752 }
1753
1754 return;
1755 } /* ixv_initialize_transmit_units */
1756
1757
1758 /************************************************************************
1759 * ixv_initialize_rss_mapping
1760 ************************************************************************/
1761 static void
1762 ixv_initialize_rss_mapping(struct adapter *adapter)
1763 {
1764 struct ixgbe_hw *hw = &adapter->hw;
1765 u32 reta = 0, mrqc, rss_key[10];
1766 int queue_id;
1767 int i, j;
1768 u32 rss_hash_config;
1769
1770 /* force use default RSS key. */
1771 #ifdef __NetBSD__
1772 rss_getkey((uint8_t *) &rss_key);
1773 #else
1774 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1775 /* Fetch the configured RSS key */
1776 rss_getkey((uint8_t *)&rss_key);
1777 } else {
1778 /* set up random bits */
1779 cprng_fast(&rss_key, sizeof(rss_key));
1780 }
1781 #endif
1782
1783 /* Now fill out hash function seeds */
1784 for (i = 0; i < 10; i++)
1785 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1786
1787 /* Set up the redirection table */
1788 for (i = 0, j = 0; i < 64; i++, j++) {
1789 if (j == adapter->num_queues)
1790 j = 0;
1791
1792 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1793 /*
1794 * Fetch the RSS bucket id for the given indirection
1795 * entry. Cap it at the number of configured buckets
1796 * (which is num_queues.)
1797 */
1798 queue_id = rss_get_indirection_to_bucket(i);
1799 queue_id = queue_id % adapter->num_queues;
1800 } else
1801 queue_id = j;
1802
1803 /*
1804 * The low 8 bits are for hash value (n+0);
1805 * The next 8 bits are for hash value (n+1), etc.
1806 */
1807 reta >>= 8;
1808 reta |= ((uint32_t)queue_id) << 24;
1809 if ((i & 3) == 3) {
1810 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1811 reta = 0;
1812 }
1813 }
1814
1815 /* Perform hash on these packet types */
1816 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1817 rss_hash_config = rss_gethashconfig();
1818 else {
1819 /*
1820 * Disable UDP - IP fragments aren't currently being handled
1821 * and so we end up with a mix of 2-tuple and 4-tuple
1822 * traffic.
1823 */
1824 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1825 | RSS_HASHTYPE_RSS_TCP_IPV4
1826 | RSS_HASHTYPE_RSS_IPV6
1827 | RSS_HASHTYPE_RSS_TCP_IPV6;
1828 }
1829
1830 mrqc = IXGBE_MRQC_RSSEN;
1831 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1832 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1833 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1834 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1835 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1836 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1837 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1838 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1839 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1840 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1841 __func__);
1842 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1843 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1844 __func__);
1845 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1846 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1847 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1848 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1849 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1850 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1851 __func__);
1852 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1853 } /* ixv_initialize_rss_mapping */
1854
1855
1856 /************************************************************************
1857 * ixv_initialize_receive_units - Setup receive registers and features.
1858 ************************************************************************/
1859 static void
1860 ixv_initialize_receive_units(struct adapter *adapter)
1861 {
1862 struct rx_ring *rxr = adapter->rx_rings;
1863 struct ixgbe_hw *hw = &adapter->hw;
1864 struct ifnet *ifp = adapter->ifp;
1865 u32 bufsz, psrtype;
1866
1867 if (ifp->if_mtu > ETHERMTU)
1868 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1869 else
1870 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1871
1872 psrtype = IXGBE_PSRTYPE_TCPHDR
1873 | IXGBE_PSRTYPE_UDPHDR
1874 | IXGBE_PSRTYPE_IPV4HDR
1875 | IXGBE_PSRTYPE_IPV6HDR
1876 | IXGBE_PSRTYPE_L2HDR;
1877
1878 if (adapter->num_queues > 1)
1879 psrtype |= 1 << 29;
1880
1881 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1882
1883 /* Tell PF our max_frame size */
1884 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1885 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1886 }
1887
1888 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1889 u64 rdba = rxr->rxdma.dma_paddr;
1890 u32 reg, rxdctl;
1891 int j = rxr->me;
1892
1893 /* Disable the queue */
1894 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1895 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1896 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1897 for (int k = 0; k < 10; k++) {
1898 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1899 IXGBE_RXDCTL_ENABLE)
1900 msec_delay(1);
1901 else
1902 break;
1903 }
1904 IXGBE_WRITE_BARRIER(hw);
1905 /* Setup the Base and Length of the Rx Descriptor Ring */
1906 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1907 (rdba & 0x00000000ffffffffULL));
1908 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1909 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1910 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1911
1912 /* Reset the ring indices */
1913 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1914 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1915
1916 /* Set up the SRRCTL register */
1917 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1918 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1919 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1920 reg |= bufsz;
1921 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1922 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1923
1924 /* Capture Rx Tail index */
1925 rxr->tail = IXGBE_VFRDT(rxr->me);
1926
1927 /* Do the queue enabling last */
1928 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1929 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1930 for (int k = 0; k < 10; k++) {
1931 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1932 IXGBE_RXDCTL_ENABLE)
1933 break;
1934 msec_delay(1);
1935 }
1936 IXGBE_WRITE_BARRIER(hw);
1937
1938 /* Set the Tail Pointer */
1939 #ifdef DEV_NETMAP
1940 /*
1941 * In netmap mode, we must preserve the buffers made
1942 * available to userspace before the if_init()
1943 * (this is true by default on the TX side, because
1944 * init makes all buffers available to userspace).
1945 *
1946 * netmap_reset() and the device specific routines
1947 * (e.g. ixgbe_setup_receive_rings()) map these
1948 * buffers at the end of the NIC ring, so here we
1949 * must set the RDT (tail) register to make sure
1950 * they are not overwritten.
1951 *
1952 * In this driver the NIC ring starts at RDH = 0,
1953 * RDT points to the last slot available for reception (?),
1954 * so RDT = num_rx_desc - 1 means the whole ring is available.
1955 */
1956 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1957 (ifp->if_capenable & IFCAP_NETMAP)) {
1958 struct netmap_adapter *na = NA(adapter->ifp);
1959 struct netmap_kring *kring = na->rx_rings[i];
1960 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1961
1962 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1963 } else
1964 #endif /* DEV_NETMAP */
1965 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1966 adapter->num_rx_desc - 1);
1967 }
1968
1969 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1970 ixv_initialize_rss_mapping(adapter);
1971 } /* ixv_initialize_receive_units */
1972
1973 /************************************************************************
1974 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1975 *
1976 * Retrieves the TDH value from the hardware
1977 ************************************************************************/
1978 static int
1979 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1980 {
1981 struct sysctlnode node = *rnode;
1982 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1983 uint32_t val;
1984
1985 if (!txr)
1986 return (0);
1987
1988 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1989 node.sysctl_data = &val;
1990 return sysctl_lookup(SYSCTLFN_CALL(&node));
1991 } /* ixv_sysctl_tdh_handler */
1992
1993 /************************************************************************
1994 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1995 *
1996 * Retrieves the TDT value from the hardware
1997 ************************************************************************/
1998 static int
1999 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
2000 {
2001 struct sysctlnode node = *rnode;
2002 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2003 uint32_t val;
2004
2005 if (!txr)
2006 return (0);
2007
2008 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
2009 node.sysctl_data = &val;
2010 return sysctl_lookup(SYSCTLFN_CALL(&node));
2011 } /* ixv_sysctl_tdt_handler */
2012
2013 /************************************************************************
2014 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
2015 * handler function
2016 *
2017 * Retrieves the next_to_check value
2018 ************************************************************************/
2019 static int
2020 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2021 {
2022 struct sysctlnode node = *rnode;
2023 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2024 uint32_t val;
2025
2026 if (!rxr)
2027 return (0);
2028
2029 val = rxr->next_to_check;
2030 node.sysctl_data = &val;
2031 return sysctl_lookup(SYSCTLFN_CALL(&node));
2032 } /* ixv_sysctl_next_to_check_handler */
2033
2034 /************************************************************************
2035 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
2036 *
2037 * Retrieves the RDH value from the hardware
2038 ************************************************************************/
2039 static int
2040 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
2041 {
2042 struct sysctlnode node = *rnode;
2043 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2044 uint32_t val;
2045
2046 if (!rxr)
2047 return (0);
2048
2049 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
2050 node.sysctl_data = &val;
2051 return sysctl_lookup(SYSCTLFN_CALL(&node));
2052 } /* ixv_sysctl_rdh_handler */
2053
2054 /************************************************************************
2055 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
2056 *
2057 * Retrieves the RDT value from the hardware
2058 ************************************************************************/
2059 static int
2060 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
2061 {
2062 struct sysctlnode node = *rnode;
2063 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2064 uint32_t val;
2065
2066 if (!rxr)
2067 return (0);
2068
2069 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
2070 node.sysctl_data = &val;
2071 return sysctl_lookup(SYSCTLFN_CALL(&node));
2072 } /* ixv_sysctl_rdt_handler */
2073
2074 static void
2075 ixv_setup_vlan_tagging(struct adapter *adapter)
2076 {
2077 struct ethercom *ec = &adapter->osdep.ec;
2078 struct ixgbe_hw *hw = &adapter->hw;
2079 struct rx_ring *rxr;
2080 u32 ctrl;
2081 int i;
2082 bool hwtagging;
2083
2084 /* Enable HW tagging only if any vlan is attached */
2085 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2086 && VLAN_ATTACHED(ec);
2087
2088 /* Enable the queues */
2089 for (i = 0; i < adapter->num_queues; i++) {
2090 rxr = &adapter->rx_rings[i];
2091 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2092 if (hwtagging)
2093 ctrl |= IXGBE_RXDCTL_VME;
2094 else
2095 ctrl &= ~IXGBE_RXDCTL_VME;
2096 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2097 /*
2098 * Let Rx path know that it needs to store VLAN tag
2099 * as part of extra mbuf info.
2100 */
2101 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2102 }
2103 } /* ixv_setup_vlan_tagging */
2104
2105 /************************************************************************
2106 * ixv_setup_vlan_support
2107 ************************************************************************/
2108 static int
2109 ixv_setup_vlan_support(struct adapter *adapter)
2110 {
2111 struct ethercom *ec = &adapter->osdep.ec;
2112 struct ixgbe_hw *hw = &adapter->hw;
2113 u32 vid, vfta, retry;
2114 struct vlanid_list *vlanidp;
2115 int rv, error = 0;
2116
2117 /*
2118 * This function is called from both if_init and ifflags_cb()
2119 * on NetBSD.
2120 */
2121
2122 /*
2123 * Part 1:
2124 * Setup VLAN HW tagging
2125 */
2126 ixv_setup_vlan_tagging(adapter);
2127
2128 if (!VLAN_ATTACHED(ec))
2129 return 0;
2130
2131 /*
2132 * Part 2:
2133 * Setup VLAN HW filter
2134 */
2135 /* Cleanup shadow_vfta */
2136 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2137 adapter->shadow_vfta[i] = 0;
2138 /* Generate shadow_vfta from ec_vids */
2139 ETHER_LOCK(ec);
2140 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2141 uint32_t idx;
2142
2143 idx = vlanidp->vid / 32;
2144 KASSERT(idx < IXGBE_VFTA_SIZE);
2145 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2146 }
2147 ETHER_UNLOCK(ec);
2148
2149 /*
2150 * A soft reset zero's out the VFTA, so
2151 * we need to repopulate it now.
2152 */
2153 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2154 if (adapter->shadow_vfta[i] == 0)
2155 continue;
2156 vfta = adapter->shadow_vfta[i];
2157 /*
2158 * Reconstruct the vlan id's
2159 * based on the bits set in each
2160 * of the array ints.
2161 */
2162 for (int j = 0; j < 32; j++) {
2163 retry = 0;
2164 if ((vfta & ((u32)1 << j)) == 0)
2165 continue;
2166 vid = (i * 32) + j;
2167
2168 /* Call the shared code mailbox routine */
2169 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2170 FALSE)) != 0) {
2171 if (++retry > 5) {
2172 device_printf(adapter->dev,
2173 "%s: max retry exceeded\n",
2174 __func__);
2175 break;
2176 }
2177 }
2178 if (rv != 0) {
2179 device_printf(adapter->dev,
2180 "failed to set vlan %d\n", vid);
2181 error = EACCES;
2182 }
2183 }
2184 }
2185 return error;
2186 } /* ixv_setup_vlan_support */
2187
2188 static int
2189 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2190 {
2191 struct ifnet *ifp = &ec->ec_if;
2192 struct adapter *adapter = ifp->if_softc;
2193 int rv;
2194
2195 if (set)
2196 rv = ixv_register_vlan(adapter, vid);
2197 else
2198 rv = ixv_unregister_vlan(adapter, vid);
2199
2200 if (rv != 0)
2201 return rv;
2202
2203 /*
2204 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2205 * or 0 to 1.
2206 */
2207 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2208 ixv_setup_vlan_tagging(adapter);
2209
2210 return rv;
2211 }
2212
2213 /************************************************************************
2214 * ixv_register_vlan
2215 *
2216 * Run via a vlan config EVENT, it enables us to use the
2217 * HW Filter table since we can get the vlan id. This just
2218 * creates the entry in the soft version of the VFTA, init
2219 * will repopulate the real table.
2220 ************************************************************************/
2221 static int
2222 ixv_register_vlan(struct adapter *adapter, u16 vtag)
2223 {
2224 struct ixgbe_hw *hw = &adapter->hw;
2225 u16 index, bit;
2226 int error;
2227
2228 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2229 return EINVAL;
2230 IXGBE_CORE_LOCK(adapter);
2231 index = (vtag >> 5) & 0x7F;
2232 bit = vtag & 0x1F;
2233 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2234 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2235 IXGBE_CORE_UNLOCK(adapter);
2236
2237 if (error != 0) {
2238 device_printf(adapter->dev, "failed to register vlan %hu\n",
2239 vtag);
2240 error = EACCES;
2241 }
2242 return error;
2243 } /* ixv_register_vlan */
2244
2245 /************************************************************************
2246 * ixv_unregister_vlan
2247 *
2248 * Run via a vlan unconfig EVENT, remove our entry
2249 * in the soft vfta.
2250 ************************************************************************/
2251 static int
2252 ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2253 {
2254 struct ixgbe_hw *hw = &adapter->hw;
2255 u16 index, bit;
2256 int error;
2257
2258 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2259 return EINVAL;
2260
2261 IXGBE_CORE_LOCK(adapter);
2262 index = (vtag >> 5) & 0x7F;
2263 bit = vtag & 0x1F;
2264 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2265 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2266 IXGBE_CORE_UNLOCK(adapter);
2267
2268 if (error != 0) {
2269 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2270 vtag);
2271 error = EIO;
2272 }
2273 return error;
2274 } /* ixv_unregister_vlan */
2275
2276 /************************************************************************
2277 * ixv_enable_intr
2278 ************************************************************************/
2279 static void
2280 ixv_enable_intr(struct adapter *adapter)
2281 {
2282 struct ixgbe_hw *hw = &adapter->hw;
2283 struct ix_queue *que = adapter->queues;
2284 u32 mask;
2285 int i;
2286
2287 /* For VTEIAC */
2288 mask = (1 << adapter->vector);
2289 for (i = 0; i < adapter->num_queues; i++, que++)
2290 mask |= (1 << que->msix);
2291 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2292
2293 /* For VTEIMS */
2294 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2295 que = adapter->queues;
2296 for (i = 0; i < adapter->num_queues; i++, que++)
2297 ixv_enable_queue(adapter, que->msix);
2298
2299 IXGBE_WRITE_FLUSH(hw);
2300 } /* ixv_enable_intr */
2301
2302 /************************************************************************
2303 * ixv_disable_intr
2304 ************************************************************************/
2305 static void
2306 ixv_disable_intr(struct adapter *adapter)
2307 {
2308 struct ix_queue *que = adapter->queues;
2309
2310 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2311
2312 /* disable interrupts other than queues */
2313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2314
2315 for (int i = 0; i < adapter->num_queues; i++, que++)
2316 ixv_disable_queue(adapter, que->msix);
2317
2318 IXGBE_WRITE_FLUSH(&adapter->hw);
2319 } /* ixv_disable_intr */
2320
2321 /************************************************************************
2322 * ixv_set_ivar
2323 *
2324 * Setup the correct IVAR register for a particular MSI-X interrupt
2325 * - entry is the register array entry
2326 * - vector is the MSI-X vector for this queue
2327 * - type is RX/TX/MISC
2328 ************************************************************************/
2329 static void
2330 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2331 {
2332 struct ixgbe_hw *hw = &adapter->hw;
2333 u32 ivar, index;
2334
2335 vector |= IXGBE_IVAR_ALLOC_VAL;
2336
2337 if (type == -1) { /* MISC IVAR */
2338 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2339 ivar &= ~0xFF;
2340 ivar |= vector;
2341 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2342 } else { /* RX/TX IVARS */
2343 index = (16 * (entry & 1)) + (8 * type);
2344 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2345 ivar &= ~(0xffUL << index);
2346 ivar |= ((u32)vector << index);
2347 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2348 }
2349 } /* ixv_set_ivar */
2350
2351 /************************************************************************
2352 * ixv_configure_ivars
2353 ************************************************************************/
2354 static void
2355 ixv_configure_ivars(struct adapter *adapter)
2356 {
2357 struct ix_queue *que = adapter->queues;
2358
2359 /* XXX We should sync EITR value calculation with ixgbe.c? */
2360
2361 for (int i = 0; i < adapter->num_queues; i++, que++) {
2362 /* First the RX queue entry */
2363 ixv_set_ivar(adapter, i, que->msix, 0);
2364 /* ... and the TX */
2365 ixv_set_ivar(adapter, i, que->msix, 1);
2366 /* Set an initial value in EITR */
2367 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2368 }
2369
2370 /* For the mailbox interrupt */
2371 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2372 } /* ixv_configure_ivars */
2373
2374
2375 /************************************************************************
2376 * ixv_save_stats
2377 *
2378 * The VF stats registers never have a truly virgin
2379 * starting point, so this routine tries to make an
2380 * artificial one, marking ground zero on attach as
2381 * it were.
2382 ************************************************************************/
2383 static void
2384 ixv_save_stats(struct adapter *adapter)
2385 {
2386 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2387
2388 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2389 stats->saved_reset_vfgprc +=
2390 stats->vfgprc.ev_count - stats->base_vfgprc;
2391 stats->saved_reset_vfgptc +=
2392 stats->vfgptc.ev_count - stats->base_vfgptc;
2393 stats->saved_reset_vfgorc +=
2394 stats->vfgorc.ev_count - stats->base_vfgorc;
2395 stats->saved_reset_vfgotc +=
2396 stats->vfgotc.ev_count - stats->base_vfgotc;
2397 stats->saved_reset_vfmprc +=
2398 stats->vfmprc.ev_count - stats->base_vfmprc;
2399 }
2400 } /* ixv_save_stats */
2401
2402 /************************************************************************
2403 * ixv_init_stats
2404 ************************************************************************/
2405 static void
2406 ixv_init_stats(struct adapter *adapter)
2407 {
2408 struct ixgbe_hw *hw = &adapter->hw;
2409
2410 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2411 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2412 adapter->stats.vf.last_vfgorc |=
2413 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2414
2415 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2416 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2417 adapter->stats.vf.last_vfgotc |=
2418 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2419
2420 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2421
2422 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2423 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2424 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2425 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2426 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2427 } /* ixv_init_stats */
2428
2429 #define UPDATE_STAT_32(reg, last, count) \
2430 { \
2431 u32 current = IXGBE_READ_REG(hw, (reg)); \
2432 if (current < (last)) \
2433 count.ev_count += 0x100000000LL; \
2434 (last) = current; \
2435 count.ev_count &= 0xFFFFFFFF00000000LL; \
2436 count.ev_count |= current; \
2437 }
2438
2439 #define UPDATE_STAT_36(lsb, msb, last, count) \
2440 { \
2441 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2442 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2443 u64 current = ((cur_msb << 32) | cur_lsb); \
2444 if (current < (last)) \
2445 count.ev_count += 0x1000000000LL; \
2446 (last) = current; \
2447 count.ev_count &= 0xFFFFFFF000000000LL; \
2448 count.ev_count |= current; \
2449 }
2450
2451 /************************************************************************
2452 * ixv_update_stats - Update the board statistics counters.
2453 ************************************************************************/
2454 void
2455 ixv_update_stats(struct adapter *adapter)
2456 {
2457 struct ixgbe_hw *hw = &adapter->hw;
2458 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2459
2460 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2461 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2462 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2463 stats->vfgorc);
2464 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2465 stats->vfgotc);
2466 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2467
2468 /* VF doesn't count errors by hardware */
2469
2470 } /* ixv_update_stats */
2471
2472 /************************************************************************
2473 * ixv_sysctl_interrupt_rate_handler
2474 ************************************************************************/
2475 static int
2476 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2477 {
2478 struct sysctlnode node = *rnode;
2479 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2480 struct adapter *adapter = que->adapter;
2481 uint32_t reg, usec, rate;
2482 int error;
2483
2484 if (que == NULL)
2485 return 0;
2486 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2487 usec = ((reg & 0x0FF8) >> 3);
2488 if (usec > 0)
2489 rate = 500000 / usec;
2490 else
2491 rate = 0;
2492 node.sysctl_data = &rate;
2493 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2494 if (error || newp == NULL)
2495 return error;
2496 reg &= ~0xfff; /* default, no limitation */
2497 if (rate > 0 && rate < 500000) {
2498 if (rate < 1000)
2499 rate = 1000;
2500 reg |= ((4000000 / rate) & 0xff8);
2501 /*
2502 * When RSC is used, ITR interval must be larger than
2503 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2504 * The minimum value is always greater than 2us on 100M
2505 * (and 10M?(not documented)), but it's not on 1G and higher.
2506 */
2507 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2508 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2509 if ((adapter->num_queues > 1)
2510 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2511 return EINVAL;
2512 }
2513 ixv_max_interrupt_rate = rate;
2514 } else
2515 ixv_max_interrupt_rate = 0;
2516 ixv_eitr_write(adapter, que->msix, reg);
2517
2518 return (0);
2519 } /* ixv_sysctl_interrupt_rate_handler */
2520
2521 const struct sysctlnode *
2522 ixv_sysctl_instance(struct adapter *adapter)
2523 {
2524 const char *dvname;
2525 struct sysctllog **log;
2526 int rc;
2527 const struct sysctlnode *rnode;
2528
2529 log = &adapter->sysctllog;
2530 dvname = device_xname(adapter->dev);
2531
2532 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2533 0, CTLTYPE_NODE, dvname,
2534 SYSCTL_DESCR("ixv information and settings"),
2535 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2536 goto err;
2537
2538 return rnode;
2539 err:
2540 device_printf(adapter->dev,
2541 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2542 return NULL;
2543 }
2544
2545 static void
2546 ixv_add_device_sysctls(struct adapter *adapter)
2547 {
2548 struct sysctllog **log;
2549 const struct sysctlnode *rnode, *cnode;
2550 device_t dev;
2551
2552 dev = adapter->dev;
2553 log = &adapter->sysctllog;
2554
2555 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2556 aprint_error_dev(dev, "could not create sysctl root\n");
2557 return;
2558 }
2559
2560 if (sysctl_createv(log, 0, &rnode, &cnode,
2561 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
2562 SYSCTL_DESCR("Debug Info"),
2563 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2564 aprint_error_dev(dev, "could not create sysctl\n");
2565
2566 if (sysctl_createv(log, 0, &rnode, &cnode,
2567 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
2568 SYSCTL_DESCR("Number of jumbo buffers per queue"),
2569 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
2570 CTL_EOL) != 0)
2571 aprint_error_dev(dev, "could not create sysctl\n");
2572
2573 if (sysctl_createv(log, 0, &rnode, &cnode,
2574 CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim",
2575 SYSCTL_DESCR("Interrupt Moderation"),
2576 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2577 aprint_error_dev(dev, "could not create sysctl\n");
2578
2579 if (sysctl_createv(log, 0, &rnode, &cnode,
2580 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2581 SYSCTL_DESCR("Use workqueue for packet processing"),
2582 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL)
2583 != 0)
2584 aprint_error_dev(dev, "could not create sysctl\n");
2585 }
2586
2587 /************************************************************************
2588 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2589 ************************************************************************/
2590 static void
2591 ixv_add_stats_sysctls(struct adapter *adapter)
2592 {
2593 device_t dev = adapter->dev;
2594 struct tx_ring *txr = adapter->tx_rings;
2595 struct rx_ring *rxr = adapter->rx_rings;
2596 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2597 struct ixgbe_hw *hw = &adapter->hw;
2598 const struct sysctlnode *rnode, *cnode;
2599 struct sysctllog **log = &adapter->sysctllog;
2600 const char *xname = device_xname(dev);
2601
2602 /* Driver Statistics */
2603 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2604 NULL, xname, "Driver tx dma soft fail EFBIG");
2605 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2606 NULL, xname, "m_defrag() failed");
2607 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2608 NULL, xname, "Driver tx dma hard fail EFBIG");
2609 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2610 NULL, xname, "Driver tx dma hard fail EINVAL");
2611 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2612 NULL, xname, "Driver tx dma hard fail other");
2613 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2614 NULL, xname, "Driver tx dma soft fail EAGAIN");
2615 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2616 NULL, xname, "Driver tx dma soft fail ENOMEM");
2617 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2618 NULL, xname, "Watchdog timeouts");
2619 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2620 NULL, xname, "TSO errors");
2621 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
2622 NULL, xname, "Admin MSI-X IRQ Handled");
2623 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
2624 NULL, xname, "Admin event");
2625
2626 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2627 snprintf(adapter->queues[i].evnamebuf,
2628 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2629 xname, i);
2630 snprintf(adapter->queues[i].namebuf,
2631 sizeof(adapter->queues[i].namebuf), "q%d", i);
2632
2633 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2634 aprint_error_dev(dev, "could not create sysctl root\n");
2635 break;
2636 }
2637
2638 if (sysctl_createv(log, 0, &rnode, &rnode,
2639 0, CTLTYPE_NODE,
2640 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2641 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2642 break;
2643
2644 if (sysctl_createv(log, 0, &rnode, &cnode,
2645 CTLFLAG_READWRITE, CTLTYPE_INT,
2646 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2647 ixv_sysctl_interrupt_rate_handler, 0,
2648 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2649 break;
2650
2651 if (sysctl_createv(log, 0, &rnode, &cnode,
2652 CTLFLAG_READONLY, CTLTYPE_INT,
2653 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2654 ixv_sysctl_tdh_handler, 0, (void *)txr,
2655 0, CTL_CREATE, CTL_EOL) != 0)
2656 break;
2657
2658 if (sysctl_createv(log, 0, &rnode, &cnode,
2659 CTLFLAG_READONLY, CTLTYPE_INT,
2660 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2661 ixv_sysctl_tdt_handler, 0, (void *)txr,
2662 0, CTL_CREATE, CTL_EOL) != 0)
2663 break;
2664
2665 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2666 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2667 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2668 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2669 "Handled queue in softint");
2670 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2671 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2672 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2673 NULL, adapter->queues[i].evnamebuf, "TSO");
2674 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2675 NULL, adapter->queues[i].evnamebuf,
2676 "TX Queue No Descriptor Available");
2677 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2678 NULL, adapter->queues[i].evnamebuf,
2679 "Queue Packets Transmitted");
2680 #ifndef IXGBE_LEGACY_TX
2681 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2682 NULL, adapter->queues[i].evnamebuf,
2683 "Packets dropped in pcq");
2684 #endif
2685
2686 #ifdef LRO
2687 struct lro_ctrl *lro = &rxr->lro;
2688 #endif /* LRO */
2689
2690 if (sysctl_createv(log, 0, &rnode, &cnode,
2691 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
2692 SYSCTL_DESCR("Receive Descriptor next to check"),
2693 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2694 CTL_CREATE, CTL_EOL) != 0)
2695 break;
2696
2697 if (sysctl_createv(log, 0, &rnode, &cnode,
2698 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
2699 SYSCTL_DESCR("Receive Descriptor Head"),
2700 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2701 CTL_CREATE, CTL_EOL) != 0)
2702 break;
2703
2704 if (sysctl_createv(log, 0, &rnode, &cnode,
2705 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
2706 SYSCTL_DESCR("Receive Descriptor Tail"),
2707 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2708 CTL_CREATE, CTL_EOL) != 0)
2709 break;
2710
2711 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2712 NULL, adapter->queues[i].evnamebuf,
2713 "Queue Packets Received");
2714 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2715 NULL, adapter->queues[i].evnamebuf,
2716 "Queue Bytes Received");
2717 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2718 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2719 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2720 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2721 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2722 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2723 #ifdef LRO
2724 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2725 CTLFLAG_RD, &lro->lro_queued, 0,
2726 "LRO Queued");
2727 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2728 CTLFLAG_RD, &lro->lro_flushed, 0,
2729 "LRO Flushed");
2730 #endif /* LRO */
2731 }
2732
2733 /* MAC stats get their own sub node */
2734
2735 snprintf(stats->namebuf,
2736 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2737
2738 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2739 stats->namebuf, "rx csum offload - IP");
2740 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2741 stats->namebuf, "rx csum offload - L4");
2742 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2743 stats->namebuf, "rx csum offload - IP bad");
2744 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2745 stats->namebuf, "rx csum offload - L4 bad");
2746
2747 /* Packet Reception Stats */
2748 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2749 xname, "Good Packets Received");
2750 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2751 xname, "Good Octets Received");
2752 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2753 xname, "Multicast Packets Received");
2754 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2755 xname, "Good Packets Transmitted");
2756 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2757 xname, "Good Octets Transmitted");
2758
2759 /* Mailbox Stats */
2760 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2761 xname, "message TXs");
2762 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2763 xname, "message RXs");
2764 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2765 xname, "ACKs");
2766 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2767 xname, "REQs");
2768 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2769 xname, "RSTs");
2770
2771 } /* ixv_add_stats_sysctls */
2772
2773 static void
2774 ixv_clear_evcnt(struct adapter *adapter)
2775 {
2776 struct tx_ring *txr = adapter->tx_rings;
2777 struct rx_ring *rxr = adapter->rx_rings;
2778 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2779 struct ixgbe_hw *hw = &adapter->hw;
2780 int i;
2781
2782 /* Driver Statistics */
2783 adapter->efbig_tx_dma_setup.ev_count = 0;
2784 adapter->mbuf_defrag_failed.ev_count = 0;
2785 adapter->efbig2_tx_dma_setup.ev_count = 0;
2786 adapter->einval_tx_dma_setup.ev_count = 0;
2787 adapter->other_tx_dma_setup.ev_count = 0;
2788 adapter->eagain_tx_dma_setup.ev_count = 0;
2789 adapter->enomem_tx_dma_setup.ev_count = 0;
2790 adapter->watchdog_events.ev_count = 0;
2791 adapter->tso_err.ev_count = 0;
2792 adapter->admin_irqev.ev_count = 0;
2793 adapter->link_workev.ev_count = 0;
2794
2795 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2796 adapter->queues[i].irqs.ev_count = 0;
2797 adapter->queues[i].handleq.ev_count = 0;
2798 adapter->queues[i].req.ev_count = 0;
2799 txr->tso_tx.ev_count = 0;
2800 txr->no_desc_avail.ev_count = 0;
2801 txr->total_packets.ev_count = 0;
2802 #ifndef IXGBE_LEGACY_TX
2803 txr->pcq_drops.ev_count = 0;
2804 #endif
2805 txr->q_efbig_tx_dma_setup = 0;
2806 txr->q_mbuf_defrag_failed = 0;
2807 txr->q_efbig2_tx_dma_setup = 0;
2808 txr->q_einval_tx_dma_setup = 0;
2809 txr->q_other_tx_dma_setup = 0;
2810 txr->q_eagain_tx_dma_setup = 0;
2811 txr->q_enomem_tx_dma_setup = 0;
2812 txr->q_tso_err = 0;
2813
2814 rxr->rx_packets.ev_count = 0;
2815 rxr->rx_bytes.ev_count = 0;
2816 rxr->rx_copies.ev_count = 0;
2817 rxr->no_jmbuf.ev_count = 0;
2818 rxr->rx_discarded.ev_count = 0;
2819 }
2820
2821 /* MAC stats get their own sub node */
2822
2823 stats->ipcs.ev_count = 0;
2824 stats->l4cs.ev_count = 0;
2825 stats->ipcs_bad.ev_count = 0;
2826 stats->l4cs_bad.ev_count = 0;
2827
2828 /* Packet Reception Stats */
2829 stats->vfgprc.ev_count = 0;
2830 stats->vfgorc.ev_count = 0;
2831 stats->vfmprc.ev_count = 0;
2832 stats->vfgptc.ev_count = 0;
2833 stats->vfgotc.ev_count = 0;
2834
2835 /* Mailbox Stats */
2836 hw->mbx.stats.msgs_tx.ev_count = 0;
2837 hw->mbx.stats.msgs_rx.ev_count = 0;
2838 hw->mbx.stats.acks.ev_count = 0;
2839 hw->mbx.stats.reqs.ev_count = 0;
2840 hw->mbx.stats.rsts.ev_count = 0;
2841
2842 } /* ixv_clear_evcnt */
2843
2844 /************************************************************************
2845 * ixv_set_sysctl_value
2846 ************************************************************************/
2847 static void
2848 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2849 const char *description, int *limit, int value)
2850 {
2851 device_t dev = adapter->dev;
2852 struct sysctllog **log;
2853 const struct sysctlnode *rnode, *cnode;
2854
2855 log = &adapter->sysctllog;
2856 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2857 aprint_error_dev(dev, "could not create sysctl root\n");
2858 return;
2859 }
2860 if (sysctl_createv(log, 0, &rnode, &cnode,
2861 CTLFLAG_READWRITE, CTLTYPE_INT,
2862 name, SYSCTL_DESCR(description),
2863 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2864 aprint_error_dev(dev, "could not create sysctl\n");
2865 *limit = value;
2866 } /* ixv_set_sysctl_value */
2867
2868 /************************************************************************
2869 * ixv_print_debug_info
2870 *
2871 * Called only when em_display_debug_stats is enabled.
2872 * Provides a way to take a look at important statistics
2873 * maintained by the driver and hardware.
2874 ************************************************************************/
2875 static void
2876 ixv_print_debug_info(struct adapter *adapter)
2877 {
2878 device_t dev = adapter->dev;
2879 struct ix_queue *que = adapter->queues;
2880 struct rx_ring *rxr;
2881 struct tx_ring *txr;
2882 #ifdef LRO
2883 struct lro_ctrl *lro;
2884 #endif /* LRO */
2885
2886 for (int i = 0; i < adapter->num_queues; i++, que++) {
2887 txr = que->txr;
2888 rxr = que->rxr;
2889 #ifdef LRO
2890 lro = &rxr->lro;
2891 #endif /* LRO */
2892 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2893 que->msix, (long)que->irqs.ev_count);
2894 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2895 rxr->me, (long long)rxr->rx_packets.ev_count);
2896 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2897 rxr->me, (long)rxr->rx_bytes.ev_count);
2898 #ifdef LRO
2899 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2900 rxr->me, (uintmax_t)lro->lro_queued);
2901 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2902 rxr->me, (uintmax_t)lro->lro_flushed);
2903 #endif /* LRO */
2904 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2905 txr->me, (long)txr->total_packets.ev_count);
2906 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2907 txr->me, (long)txr->no_desc_avail.ev_count);
2908 }
2909
2910 device_printf(dev, "Admin IRQ Handled: %lu\n",
2911 (long)adapter->admin_irqev.ev_count);
2912 device_printf(dev, "Admin work Handled: %lu\n",
2913 (long)adapter->link_workev.ev_count);
2914 } /* ixv_print_debug_info */
2915
2916 /************************************************************************
2917 * ixv_sysctl_debug
2918 ************************************************************************/
2919 static int
2920 ixv_sysctl_debug(SYSCTLFN_ARGS)
2921 {
2922 struct sysctlnode node = *rnode;
2923 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2924 int error, result;
2925
2926 node.sysctl_data = &result;
2927 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2928
2929 if (error || newp == NULL)
2930 return error;
2931
2932 if (result == 1)
2933 ixv_print_debug_info(adapter);
2934
2935 return 0;
2936 } /* ixv_sysctl_debug */
2937
2938 /************************************************************************
2939 * ixv_init_device_features
2940 ************************************************************************/
2941 static void
2942 ixv_init_device_features(struct adapter *adapter)
2943 {
2944 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2945 | IXGBE_FEATURE_VF
2946 | IXGBE_FEATURE_RSS
2947 | IXGBE_FEATURE_LEGACY_TX;
2948
2949 /* A tad short on feature flags for VFs, atm. */
2950 switch (adapter->hw.mac.type) {
2951 case ixgbe_mac_82599_vf:
2952 break;
2953 case ixgbe_mac_X540_vf:
2954 break;
2955 case ixgbe_mac_X550_vf:
2956 case ixgbe_mac_X550EM_x_vf:
2957 case ixgbe_mac_X550EM_a_vf:
2958 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2959 break;
2960 default:
2961 break;
2962 }
2963
2964 /* Enabled by default... */
2965 /* Is a virtual function (VF) */
2966 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2967 adapter->feat_en |= IXGBE_FEATURE_VF;
2968 /* Netmap */
2969 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2970 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2971 /* Receive-Side Scaling (RSS) */
2972 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2973 adapter->feat_en |= IXGBE_FEATURE_RSS;
2974 /* Needs advanced context descriptor regardless of offloads req'd */
2975 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2976 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2977
2978 /* Enabled via sysctl... */
2979 /* Legacy (single queue) transmit */
2980 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2981 ixv_enable_legacy_tx)
2982 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2983 } /* ixv_init_device_features */
2984
2985 /************************************************************************
2986 * ixv_shutdown - Shutdown entry point
2987 ************************************************************************/
2988 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2989 static int
2990 ixv_shutdown(device_t dev)
2991 {
2992 struct adapter *adapter = device_private(dev);
2993 IXGBE_CORE_LOCK(adapter);
2994 ixv_stop_locked(adapter);
2995 IXGBE_CORE_UNLOCK(adapter);
2996
2997 return (0);
2998 } /* ixv_shutdown */
2999 #endif
3000
3001 static int
3002 ixv_ifflags_cb(struct ethercom *ec)
3003 {
3004 struct ifnet *ifp = &ec->ec_if;
3005 struct adapter *adapter = ifp->if_softc;
3006 u_short saved_flags;
3007 u_short change;
3008 int rv = 0;
3009
3010 IXGBE_CORE_LOCK(adapter);
3011
3012 saved_flags = adapter->if_flags;
3013 change = ifp->if_flags ^ adapter->if_flags;
3014 if (change != 0)
3015 adapter->if_flags = ifp->if_flags;
3016
3017 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3018 rv = ENETRESET;
3019 goto out;
3020 } else if ((change & IFF_PROMISC) != 0) {
3021 rv = ixv_set_rxfilter(adapter);
3022 if (rv != 0) {
3023 /* Restore previous */
3024 adapter->if_flags = saved_flags;
3025 goto out;
3026 }
3027 }
3028
3029 /* Check for ec_capenable. */
3030 change = ec->ec_capenable ^ adapter->ec_capenable;
3031 adapter->ec_capenable = ec->ec_capenable;
3032 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
3033 | ETHERCAP_VLAN_HWFILTER)) != 0) {
3034 rv = ENETRESET;
3035 goto out;
3036 }
3037
3038 /*
3039 * Special handling is not required for ETHERCAP_VLAN_MTU.
3040 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
3041 */
3042
3043 /* Set up VLAN support and filter */
3044 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
3045 rv = ixv_setup_vlan_support(adapter);
3046
3047 out:
3048 IXGBE_CORE_UNLOCK(adapter);
3049
3050 return rv;
3051 }
3052
3053
3054 /************************************************************************
3055 * ixv_ioctl - Ioctl entry point
3056 *
3057 * Called when the user wants to configure the interface.
3058 *
3059 * return 0 on success, positive on failure
3060 ************************************************************************/
3061 static int
3062 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
3063 {
3064 struct adapter *adapter = ifp->if_softc;
3065 struct ixgbe_hw *hw = &adapter->hw;
3066 struct ifcapreq *ifcr = data;
3067 int error;
3068 int l4csum_en;
3069 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
3070 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3071
3072 switch (command) {
3073 case SIOCSIFFLAGS:
3074 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
3075 break;
3076 case SIOCADDMULTI: {
3077 struct ether_multi *enm;
3078 struct ether_multistep step;
3079 struct ethercom *ec = &adapter->osdep.ec;
3080 bool overflow = false;
3081 int mcnt = 0;
3082
3083 /*
3084 * Check the number of multicast address. If it exceeds,
3085 * return ENOSPC.
3086 * Update this code when we support API 1.3.
3087 */
3088 ETHER_LOCK(ec);
3089 ETHER_FIRST_MULTI(step, ec, enm);
3090 while (enm != NULL) {
3091 mcnt++;
3092
3093 /*
3094 * This code is before adding, so one room is required
3095 * at least.
3096 */
3097 if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3098 overflow = true;
3099 break;
3100 }
3101 ETHER_NEXT_MULTI(step, enm);
3102 }
3103 ETHER_UNLOCK(ec);
3104 error = 0;
3105 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3106 error = hw->mac.ops.update_xcast_mode(hw,
3107 IXGBEVF_XCAST_MODE_ALLMULTI);
3108 if (error == IXGBE_ERR_NOT_TRUSTED) {
3109 device_printf(adapter->dev,
3110 "this interface is not trusted\n");
3111 error = EPERM;
3112 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3113 device_printf(adapter->dev,
3114 "the PF doesn't support allmulti mode\n");
3115 error = EOPNOTSUPP;
3116 } else if (error) {
3117 device_printf(adapter->dev,
3118 "number of Ethernet multicast addresses "
3119 "exceeds the limit (%d). error = %d\n",
3120 IXGBE_MAX_VF_MC, error);
3121 error = ENOSPC;
3122 } else
3123 ec->ec_flags |= ETHER_F_ALLMULTI;
3124 }
3125 if (error)
3126 return error;
3127 }
3128 /*FALLTHROUGH*/
3129 case SIOCDELMULTI:
3130 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3131 break;
3132 case SIOCSIFMEDIA:
3133 case SIOCGIFMEDIA:
3134 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3135 break;
3136 case SIOCSIFCAP:
3137 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3138 break;
3139 case SIOCSIFMTU:
3140 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3141 break;
3142 case SIOCZIFDATA:
3143 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3144 ixv_update_stats(adapter);
3145 ixv_clear_evcnt(adapter);
3146 break;
3147 default:
3148 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3149 break;
3150 }
3151
3152 switch (command) {
3153 case SIOCSIFCAP:
3154 /* Layer-4 Rx checksum offload has to be turned on and
3155 * off as a unit.
3156 */
3157 l4csum_en = ifcr->ifcr_capenable & l4csum;
3158 if (l4csum_en != l4csum && l4csum_en != 0)
3159 return EINVAL;
3160 /*FALLTHROUGH*/
3161 case SIOCADDMULTI:
3162 case SIOCDELMULTI:
3163 case SIOCSIFFLAGS:
3164 case SIOCSIFMTU:
3165 default:
3166 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3167 return error;
3168 if ((ifp->if_flags & IFF_RUNNING) == 0)
3169 ;
3170 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3171 IXGBE_CORE_LOCK(adapter);
3172 ixv_init_locked(adapter);
3173 IXGBE_CORE_UNLOCK(adapter);
3174 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3175 /*
3176 * Multicast list has changed; set the hardware filter
3177 * accordingly.
3178 */
3179 IXGBE_CORE_LOCK(adapter);
3180 ixv_disable_intr(adapter);
3181 ixv_set_rxfilter(adapter);
3182 ixv_enable_intr(adapter);
3183 IXGBE_CORE_UNLOCK(adapter);
3184 }
3185 return 0;
3186 }
3187 } /* ixv_ioctl */
3188
3189 /************************************************************************
3190 * ixv_init
3191 ************************************************************************/
3192 static int
3193 ixv_init(struct ifnet *ifp)
3194 {
3195 struct adapter *adapter = ifp->if_softc;
3196
3197 IXGBE_CORE_LOCK(adapter);
3198 ixv_init_locked(adapter);
3199 IXGBE_CORE_UNLOCK(adapter);
3200
3201 return 0;
3202 } /* ixv_init */
3203
3204 /************************************************************************
3205 * ixv_handle_que
3206 ************************************************************************/
3207 static void
3208 ixv_handle_que(void *context)
3209 {
3210 struct ix_queue *que = context;
3211 struct adapter *adapter = que->adapter;
3212 struct tx_ring *txr = que->txr;
3213 struct ifnet *ifp = adapter->ifp;
3214 bool more;
3215
3216 que->handleq.ev_count++;
3217
3218 if (ifp->if_flags & IFF_RUNNING) {
3219 more = ixgbe_rxeof(que);
3220 IXGBE_TX_LOCK(txr);
3221 more |= ixgbe_txeof(txr);
3222 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
3223 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3224 ixgbe_mq_start_locked(ifp, txr);
3225 /* Only for queue 0 */
3226 /* NetBSD still needs this for CBQ */
3227 if ((&adapter->queues[0] == que)
3228 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3229 ixgbe_legacy_start_locked(ifp, txr);
3230 IXGBE_TX_UNLOCK(txr);
3231 if (more) {
3232 que->req.ev_count++;
3233 if (adapter->txrx_use_workqueue) {
3234 /*
3235 * "enqueued flag" is not required here
3236 * the same as ixg(4). See ixgbe_msix_que().
3237 */
3238 workqueue_enqueue(adapter->que_wq,
3239 &que->wq_cookie, curcpu());
3240 } else
3241 softint_schedule(que->que_si);
3242 return;
3243 }
3244 }
3245
3246 /* Re-enable this interrupt */
3247 ixv_enable_queue(adapter, que->msix);
3248
3249 return;
3250 } /* ixv_handle_que */
3251
3252 /************************************************************************
3253 * ixv_handle_que_work
3254 ************************************************************************/
3255 static void
3256 ixv_handle_que_work(struct work *wk, void *context)
3257 {
3258 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3259
3260 /*
3261 * "enqueued flag" is not required here the same as ixg(4).
3262 * See ixgbe_msix_que().
3263 */
3264 ixv_handle_que(que);
3265 }
3266
3267 /************************************************************************
3268 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3269 ************************************************************************/
3270 static int
3271 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
3272 {
3273 device_t dev = adapter->dev;
3274 struct ix_queue *que = adapter->queues;
3275 struct tx_ring *txr = adapter->tx_rings;
3276 int error, msix_ctrl, rid, vector = 0;
3277 pci_chipset_tag_t pc;
3278 pcitag_t tag;
3279 char intrbuf[PCI_INTRSTR_LEN];
3280 char wqname[MAXCOMLEN];
3281 char intr_xname[32];
3282 const char *intrstr = NULL;
3283 kcpuset_t *affinity;
3284 int cpu_id = 0;
3285
3286 pc = adapter->osdep.pc;
3287 tag = adapter->osdep.tag;
3288
3289 adapter->osdep.nintrs = adapter->num_queues + 1;
3290 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3291 adapter->osdep.nintrs) != 0) {
3292 aprint_error_dev(dev,
3293 "failed to allocate MSI-X interrupt\n");
3294 return (ENXIO);
3295 }
3296
3297 kcpuset_create(&affinity, false);
3298 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3299 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3300 device_xname(dev), i);
3301 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3302 sizeof(intrbuf));
3303 #ifdef IXGBE_MPSAFE
3304 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3305 true);
3306 #endif
3307 /* Set the handler function */
3308 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3309 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3310 intr_xname);
3311 if (que->res == NULL) {
3312 pci_intr_release(pc, adapter->osdep.intrs,
3313 adapter->osdep.nintrs);
3314 aprint_error_dev(dev,
3315 "Failed to register QUE handler\n");
3316 kcpuset_destroy(affinity);
3317 return (ENXIO);
3318 }
3319 que->msix = vector;
3320 adapter->active_queues |= (u64)(1 << que->msix);
3321
3322 cpu_id = i;
3323 /* Round-robin affinity */
3324 kcpuset_zero(affinity);
3325 kcpuset_set(affinity, cpu_id % ncpu);
3326 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3327 NULL);
3328 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3329 intrstr);
3330 if (error == 0)
3331 aprint_normal(", bound queue %d to cpu %d\n",
3332 i, cpu_id % ncpu);
3333 else
3334 aprint_normal("\n");
3335
3336 #ifndef IXGBE_LEGACY_TX
3337 txr->txr_si
3338 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3339 ixgbe_deferred_mq_start, txr);
3340 #endif
3341 que->que_si
3342 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3343 ixv_handle_que, que);
3344 if (que->que_si == NULL) {
3345 aprint_error_dev(dev,
3346 "could not establish software interrupt\n");
3347 }
3348 }
3349 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3350 error = workqueue_create(&adapter->txr_wq, wqname,
3351 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3352 IXGBE_WORKQUEUE_FLAGS);
3353 if (error) {
3354 aprint_error_dev(dev,
3355 "couldn't create workqueue for deferred Tx\n");
3356 }
3357 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3358
3359 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3360 error = workqueue_create(&adapter->que_wq, wqname,
3361 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3362 IXGBE_WORKQUEUE_FLAGS);
3363 if (error) {
3364 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
3365 }
3366
3367 /* and Mailbox */
3368 cpu_id++;
3369 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3370 adapter->vector = vector;
3371 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3372 sizeof(intrbuf));
3373 #ifdef IXGBE_MPSAFE
3374 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3375 true);
3376 #endif
3377 /* Set the mbx handler function */
3378 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3379 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3380 intr_xname);
3381 if (adapter->osdep.ihs[vector] == NULL) {
3382 aprint_error_dev(dev, "Failed to register LINK handler\n");
3383 kcpuset_destroy(affinity);
3384 return (ENXIO);
3385 }
3386 /* Round-robin affinity */
3387 kcpuset_zero(affinity);
3388 kcpuset_set(affinity, cpu_id % ncpu);
3389 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
3390 NULL);
3391
3392 aprint_normal_dev(dev,
3393 "for link, interrupting at %s", intrstr);
3394 if (error == 0)
3395 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3396 else
3397 aprint_normal("\n");
3398
3399 /* Tasklets for Mailbox */
3400 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
3401 error = workqueue_create(&adapter->admin_wq, wqname,
3402 ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3403 IXGBE_TASKLET_WQ_FLAGS);
3404 if (error) {
3405 aprint_error_dev(dev,
3406 "could not create admin workqueue (%d)\n", error);
3407 goto err_out;
3408 }
3409
3410 /*
3411 * Due to a broken design QEMU will fail to properly
3412 * enable the guest for MSI-X unless the vectors in
3413 * the table are all set up, so we must rewrite the
3414 * ENABLE in the MSI-X control register again at this
3415 * point to cause it to successfully initialize us.
3416 */
3417 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3418 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3419 rid += PCI_MSIX_CTL;
3420 msix_ctrl = pci_conf_read(pc, tag, rid);
3421 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3422 pci_conf_write(pc, tag, rid, msix_ctrl);
3423 }
3424
3425 kcpuset_destroy(affinity);
3426 return (0);
3427 err_out:
3428 kcpuset_destroy(affinity);
3429 ixv_free_deferred_handlers(adapter);
3430 ixv_free_pci_resources(adapter);
3431 return (error);
3432 } /* ixv_allocate_msix */
3433
3434 /************************************************************************
3435 * ixv_configure_interrupts - Setup MSI-X resources
3436 *
3437 * Note: The VF device MUST use MSI-X, there is no fallback.
3438 ************************************************************************/
3439 static int
3440 ixv_configure_interrupts(struct adapter *adapter)
3441 {
3442 device_t dev = adapter->dev;
3443 int want, queues, msgs;
3444
3445 /* Must have at least 2 MSI-X vectors */
3446 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3447 if (msgs < 2) {
3448 aprint_error_dev(dev, "MSIX config error\n");
3449 return (ENXIO);
3450 }
3451 msgs = MIN(msgs, IXG_MAX_NINTR);
3452
3453 /* Figure out a reasonable auto config value */
3454 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3455
3456 if (ixv_num_queues != 0)
3457 queues = ixv_num_queues;
3458 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3459 queues = IXGBE_VF_MAX_TX_QUEUES;
3460
3461 /*
3462 * Want vectors for the queues,
3463 * plus an additional for mailbox.
3464 */
3465 want = queues + 1;
3466 if (msgs >= want)
3467 msgs = want;
3468 else {
3469 aprint_error_dev(dev,
3470 "MSI-X Configuration Problem, "
3471 "%d vectors but %d queues wanted!\n",
3472 msgs, want);
3473 return -1;
3474 }
3475
3476 adapter->msix_mem = (void *)1; /* XXX */
3477 aprint_normal_dev(dev,
3478 "Using MSI-X interrupts with %d vectors\n", msgs);
3479 adapter->num_queues = queues;
3480
3481 return (0);
3482 } /* ixv_configure_interrupts */
3483
3484
3485 /************************************************************************
3486 * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
3487 *
3488 * Done outside of interrupt context since the driver might sleep
3489 ************************************************************************/
3490 static void
3491 ixv_handle_admin(struct work *wk, void *context)
3492 {
3493 struct adapter *adapter = context;
3494 struct ixgbe_hw *hw = &adapter->hw;
3495
3496 IXGBE_CORE_LOCK(adapter);
3497
3498 ++adapter->link_workev.ev_count;
3499 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3500 &adapter->link_up, FALSE);
3501 ixv_update_link_status(adapter);
3502
3503 adapter->task_requests = 0;
3504 atomic_store_relaxed(&adapter->admin_pending, 0);
3505
3506 /* Re-enable interrupts */
3507 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
3508
3509 IXGBE_CORE_UNLOCK(adapter);
3510 } /* ixv_handle_admin */
3511
3512 /************************************************************************
3513 * ixv_check_link - Used in the local timer to poll for link changes
3514 ************************************************************************/
3515 static s32
3516 ixv_check_link(struct adapter *adapter)
3517 {
3518 s32 error;
3519
3520 KASSERT(mutex_owned(&adapter->core_mtx));
3521
3522 adapter->hw.mac.get_link_status = TRUE;
3523
3524 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3525 &adapter->link_speed, &adapter->link_up, FALSE);
3526 ixv_update_link_status(adapter);
3527
3528 return error;
3529 } /* ixv_check_link */
3530