ixv.c revision 1.162 1 /* $NetBSD: ixv.c,v 1.162 2021/06/16 00:21:18 riastradh Exp $ */
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.162 2021/06/16 00:21:18 riastradh Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_net_mpsafe.h"
44 #include "opt_ixgbe.h"
45 #endif
46
47 #include "ixgbe.h"
48 #include "vlan.h"
49
50 /************************************************************************
51 * Driver version
52 ************************************************************************/
53 static const char ixv_driver_version[] = "2.0.1-k";
54 /* XXX NetBSD: + 1.5.17 */
55
56 /************************************************************************
57 * PCI Device ID Table
58 *
59 * Used by probe to select devices to load on
60 * Last field stores an index into ixv_strings
61 * Last entry must be all 0s
62 *
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
64 ************************************************************************/
65 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
66 {
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
72 /* required last entry */
73 {0, 0, 0, 0, 0}
74 };
75
76 /************************************************************************
77 * Table of branding strings
78 ************************************************************************/
79 static const char *ixv_strings[] = {
80 "Intel(R) PRO/10GbE Virtual Function Network Driver"
81 };
82
83 /*********************************************************************
84 * Function prototypes
85 *********************************************************************/
86 static int ixv_probe(device_t, cfdata_t, void *);
87 static void ixv_attach(device_t, device_t, void *);
88 static int ixv_detach(device_t, int);
89 #if 0
90 static int ixv_shutdown(device_t);
91 #endif
92 static int ixv_ifflags_cb(struct ethercom *);
93 static int ixv_ioctl(struct ifnet *, u_long, void *);
94 static int ixv_init(struct ifnet *);
95 static void ixv_init_locked(struct adapter *);
96 static void ixv_ifstop(struct ifnet *, int);
97 static void ixv_stop_locked(void *);
98 static void ixv_init_device_features(struct adapter *);
99 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
100 static int ixv_media_change(struct ifnet *);
101 static int ixv_allocate_pci_resources(struct adapter *,
102 const struct pci_attach_args *);
103 static void ixv_free_deferred_handlers(struct adapter *);
104 static int ixv_allocate_msix(struct adapter *,
105 const struct pci_attach_args *);
106 static int ixv_configure_interrupts(struct adapter *);
107 static void ixv_free_pci_resources(struct adapter *);
108 static void ixv_local_timer(void *);
109 static void ixv_handle_timer(struct work *, void *);
110 static int ixv_setup_interface(device_t, struct adapter *);
111 static void ixv_schedule_admin_tasklet(struct adapter *);
112 static int ixv_negotiate_api(struct adapter *);
113
114 static void ixv_initialize_transmit_units(struct adapter *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_initialize_rss_mapping(struct adapter *);
117 static s32 ixv_check_link(struct adapter *);
118
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static int ixv_set_rxfilter(struct adapter *);
122 static void ixv_update_link_status(struct adapter *);
123 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
124 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
125 static void ixv_configure_ivars(struct adapter *);
126 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
127 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
128
129 static void ixv_setup_vlan_tagging(struct adapter *);
130 static int ixv_setup_vlan_support(struct adapter *);
131 static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
132 static int ixv_register_vlan(struct adapter *, u16);
133 static int ixv_unregister_vlan(struct adapter *, u16);
134
135 static void ixv_add_device_sysctls(struct adapter *);
136 static void ixv_save_stats(struct adapter *);
137 static void ixv_init_stats(struct adapter *);
138 static void ixv_update_stats(struct adapter *);
139 static void ixv_add_stats_sysctls(struct adapter *);
140 static void ixv_clear_evcnt(struct adapter *);
141
142 /* Sysctl handlers */
143 static void ixv_set_sysctl_value(struct adapter *, const char *,
144 const char *, int *, int);
145 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
146 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
147 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
148 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
149 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
150 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
151
152 /* The MSI-X Interrupt handlers */
153 static int ixv_msix_que(void *);
154 static int ixv_msix_mbx(void *);
155
156 /* Event handlers running on workqueue */
157 static void ixv_handle_que(void *);
158
159 /* Deferred workqueue handlers */
160 static void ixv_handle_admin(struct work *, void *);
161 static void ixv_handle_que_work(struct work *, void *);
162
163 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
164 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
165
166 /************************************************************************
167 * NetBSD Device Interface Entry Points
168 ************************************************************************/
169 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
170 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
171 DVF_DETACH_SHUTDOWN);
172
173 #if 0
174 static driver_t ixv_driver = {
175 "ixv", ixv_methods, sizeof(struct adapter),
176 };
177
178 devclass_t ixv_devclass;
179 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
180 MODULE_DEPEND(ixv, pci, 1, 1, 1);
181 MODULE_DEPEND(ixv, ether, 1, 1, 1);
182 #endif
183
184 /*
185 * TUNEABLE PARAMETERS:
186 */
187
188 /* Number of Queues - do not exceed MSI-X vectors - 1 */
189 static int ixv_num_queues = 0;
190 #define TUNABLE_INT(__x, __y)
191 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
192
193 /*
194 * AIM: Adaptive Interrupt Moderation
195 * which means that the interrupt rate
196 * is varied over time based on the
197 * traffic for that interrupt vector
198 */
199 static bool ixv_enable_aim = false;
200 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
201
202 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
203 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
204
205 /* How many packets rxeof tries to clean at a time */
206 static int ixv_rx_process_limit = 256;
207 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
208
209 /* How many packets txeof tries to clean at a time */
210 static int ixv_tx_process_limit = 256;
211 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
212
213 /* Which packet processing uses workqueue or softint */
214 static bool ixv_txrx_workqueue = false;
215
216 /*
217 * Number of TX descriptors per ring,
218 * setting higher than RX as this seems
219 * the better performing choice.
220 */
221 static int ixv_txd = PERFORM_TXD;
222 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
223
224 /* Number of RX descriptors per ring */
225 static int ixv_rxd = PERFORM_RXD;
226 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227
228 /* Legacy Transmit (single queue) */
229 static int ixv_enable_legacy_tx = 0;
230 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
231
232 #ifdef NET_MPSAFE
233 #define IXGBE_MPSAFE 1
234 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
235 #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
236 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
237 #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
238 #else
239 #define IXGBE_CALLOUT_FLAGS 0
240 #define IXGBE_SOFTINT_FLAGS 0
241 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
242 #define IXGBE_TASKLET_WQ_FLAGS 0
243 #endif
244 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
245
246 #if 0
247 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
248 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
249 #endif
250
251 /************************************************************************
252 * ixv_probe - Device identification routine
253 *
254 * Determines if the driver should be loaded on
255 * adapter based on its PCI vendor/device ID.
256 *
257 * return BUS_PROBE_DEFAULT on success, positive on failure
258 ************************************************************************/
259 static int
260 ixv_probe(device_t dev, cfdata_t cf, void *aux)
261 {
262 #ifdef __HAVE_PCI_MSI_MSIX
263 const struct pci_attach_args *pa = aux;
264
265 return (ixv_lookup(pa) != NULL) ? 1 : 0;
266 #else
267 return 0;
268 #endif
269 } /* ixv_probe */
270
271 static const ixgbe_vendor_info_t *
272 ixv_lookup(const struct pci_attach_args *pa)
273 {
274 const ixgbe_vendor_info_t *ent;
275 pcireg_t subid;
276
277 INIT_DEBUGOUT("ixv_lookup: begin");
278
279 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
280 return NULL;
281
282 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
283
284 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
285 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
286 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
287 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
288 (ent->subvendor_id == 0)) &&
289 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
290 (ent->subdevice_id == 0))) {
291 return ent;
292 }
293 }
294
295 return NULL;
296 }
297
298 /************************************************************************
299 * ixv_attach - Device initialization routine
300 *
301 * Called when the driver is being loaded.
302 * Identifies the type of hardware, allocates all resources
303 * and initializes the hardware.
304 *
305 * return 0 on success, positive on failure
306 ************************************************************************/
307 static void
308 ixv_attach(device_t parent, device_t dev, void *aux)
309 {
310 struct adapter *adapter;
311 struct ixgbe_hw *hw;
312 int error = 0;
313 pcireg_t id, subid;
314 const ixgbe_vendor_info_t *ent;
315 const struct pci_attach_args *pa = aux;
316 const char *apivstr;
317 const char *str;
318 char wqname[MAXCOMLEN];
319 char buf[256];
320
321 INIT_DEBUGOUT("ixv_attach: begin");
322
323 /*
324 * Make sure BUSMASTER is set, on a VM under
325 * KVM it may not be and will break things.
326 */
327 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
328
329 /* Allocate, clear, and link in our adapter structure */
330 adapter = device_private(dev);
331 adapter->hw.back = adapter;
332 adapter->dev = dev;
333 hw = &adapter->hw;
334
335 adapter->init_locked = ixv_init_locked;
336 adapter->stop_locked = ixv_stop_locked;
337
338 adapter->osdep.pc = pa->pa_pc;
339 adapter->osdep.tag = pa->pa_tag;
340 if (pci_dma64_available(pa))
341 adapter->osdep.dmat = pa->pa_dmat64;
342 else
343 adapter->osdep.dmat = pa->pa_dmat;
344 adapter->osdep.attached = false;
345
346 ent = ixv_lookup(pa);
347
348 KASSERT(ent != NULL);
349
350 aprint_normal(": %s, Version - %s\n",
351 ixv_strings[ent->index], ixv_driver_version);
352
353 /* Core Lock Init */
354 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
355
356 /* Do base PCI setup - map BAR0 */
357 if (ixv_allocate_pci_resources(adapter, pa)) {
358 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
359 error = ENXIO;
360 goto err_out;
361 }
362
363 /* SYSCTL APIs */
364 ixv_add_device_sysctls(adapter);
365
366 /* Set up the timer callout and workqueue */
367 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
368 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
369 error = workqueue_create(&adapter->timer_wq, wqname,
370 ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
371 IXGBE_TASKLET_WQ_FLAGS);
372 if (error) {
373 aprint_error_dev(dev,
374 "could not create timer workqueue (%d)\n", error);
375 goto err_out;
376 }
377
378 /* Save off the information about this board */
379 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
380 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
381 hw->vendor_id = PCI_VENDOR(id);
382 hw->device_id = PCI_PRODUCT(id);
383 hw->revision_id =
384 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
385 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
386 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
387
388 /* A subset of set_mac_type */
389 switch (hw->device_id) {
390 case IXGBE_DEV_ID_82599_VF:
391 hw->mac.type = ixgbe_mac_82599_vf;
392 str = "82599 VF";
393 break;
394 case IXGBE_DEV_ID_X540_VF:
395 hw->mac.type = ixgbe_mac_X540_vf;
396 str = "X540 VF";
397 break;
398 case IXGBE_DEV_ID_X550_VF:
399 hw->mac.type = ixgbe_mac_X550_vf;
400 str = "X550 VF";
401 break;
402 case IXGBE_DEV_ID_X550EM_X_VF:
403 hw->mac.type = ixgbe_mac_X550EM_x_vf;
404 str = "X550EM X VF";
405 break;
406 case IXGBE_DEV_ID_X550EM_A_VF:
407 hw->mac.type = ixgbe_mac_X550EM_a_vf;
408 str = "X550EM A VF";
409 break;
410 default:
411 /* Shouldn't get here since probe succeeded */
412 aprint_error_dev(dev, "Unknown device ID!\n");
413 error = ENXIO;
414 goto err_out;
415 break;
416 }
417 aprint_normal_dev(dev, "device %s\n", str);
418
419 ixv_init_device_features(adapter);
420
421 /* Initialize the shared code */
422 error = ixgbe_init_ops_vf(hw);
423 if (error) {
424 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
425 error = EIO;
426 goto err_out;
427 }
428
429 /* Setup the mailbox */
430 ixgbe_init_mbx_params_vf(hw);
431
432 /* Set the right number of segments */
433 adapter->num_segs = IXGBE_82599_SCATTER;
434
435 /* Reset mbox api to 1.0 */
436 error = hw->mac.ops.reset_hw(hw);
437 if (error == IXGBE_ERR_RESET_FAILED)
438 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
439 else if (error)
440 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
441 error);
442 if (error) {
443 error = EIO;
444 goto err_out;
445 }
446
447 error = hw->mac.ops.init_hw(hw);
448 if (error) {
449 aprint_error_dev(dev, "...init_hw() failed!\n");
450 error = EIO;
451 goto err_out;
452 }
453
454 /* Negotiate mailbox API version */
455 error = ixv_negotiate_api(adapter);
456 if (error)
457 aprint_normal_dev(dev,
458 "MBX API negotiation failed during attach!\n");
459 switch (hw->api_version) {
460 case ixgbe_mbox_api_10:
461 apivstr = "1.0";
462 break;
463 case ixgbe_mbox_api_20:
464 apivstr = "2.0";
465 break;
466 case ixgbe_mbox_api_11:
467 apivstr = "1.1";
468 break;
469 case ixgbe_mbox_api_12:
470 apivstr = "1.2";
471 break;
472 case ixgbe_mbox_api_13:
473 apivstr = "1.3";
474 break;
475 default:
476 apivstr = "unknown";
477 break;
478 }
479 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
480
481 /* If no mac address was assigned, make a random one */
482 if (!ixv_check_ether_addr(hw->mac.addr)) {
483 u8 addr[ETHER_ADDR_LEN];
484 uint64_t rndval = cprng_strong64();
485
486 memcpy(addr, &rndval, sizeof(addr));
487 addr[0] &= 0xFE;
488 addr[0] |= 0x02;
489 bcopy(addr, hw->mac.addr, sizeof(addr));
490 }
491
492 /* Register for VLAN events */
493 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
494
495 /* Sysctls for limiting the amount of work done in the taskqueues */
496 ixv_set_sysctl_value(adapter, "rx_processing_limit",
497 "max number of rx packets to process",
498 &adapter->rx_process_limit, ixv_rx_process_limit);
499
500 ixv_set_sysctl_value(adapter, "tx_processing_limit",
501 "max number of tx packets to process",
502 &adapter->tx_process_limit, ixv_tx_process_limit);
503
504 /* Do descriptor calc and sanity checks */
505 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
506 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
507 aprint_error_dev(dev, "TXD config issue, using default!\n");
508 adapter->num_tx_desc = DEFAULT_TXD;
509 } else
510 adapter->num_tx_desc = ixv_txd;
511
512 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
513 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
514 aprint_error_dev(dev, "RXD config issue, using default!\n");
515 adapter->num_rx_desc = DEFAULT_RXD;
516 } else
517 adapter->num_rx_desc = ixv_rxd;
518
519 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
520
521 /* Setup MSI-X */
522 error = ixv_configure_interrupts(adapter);
523 if (error)
524 goto err_out;
525
526 /* Allocate our TX/RX Queues */
527 if (ixgbe_allocate_queues(adapter)) {
528 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
529 error = ENOMEM;
530 goto err_out;
531 }
532
533 /* hw.ix defaults init */
534 adapter->enable_aim = ixv_enable_aim;
535
536 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
537
538 error = ixv_allocate_msix(adapter, pa);
539 if (error) {
540 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
541 goto err_late;
542 }
543
544 /* Setup OS specific network interface */
545 error = ixv_setup_interface(dev, adapter);
546 if (error != 0) {
547 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
548 goto err_late;
549 }
550
551 /* Do the stats setup */
552 ixv_save_stats(adapter);
553 ixv_init_stats(adapter);
554 ixv_add_stats_sysctls(adapter);
555
556 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
557 ixgbe_netmap_attach(adapter);
558
559 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
560 aprint_verbose_dev(dev, "feature cap %s\n", buf);
561 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
562 aprint_verbose_dev(dev, "feature ena %s\n", buf);
563
564 INIT_DEBUGOUT("ixv_attach: end");
565 adapter->osdep.attached = true;
566
567 return;
568
569 err_late:
570 ixgbe_free_queues(adapter);
571 err_out:
572 ixv_free_pci_resources(adapter);
573 IXGBE_CORE_LOCK_DESTROY(adapter);
574
575 return;
576 } /* ixv_attach */
577
578 /************************************************************************
579 * ixv_detach - Device removal routine
580 *
581 * Called when the driver is being removed.
582 * Stops the adapter and deallocates all the resources
583 * that were allocated for driver operation.
584 *
585 * return 0 on success, positive on failure
586 ************************************************************************/
587 static int
588 ixv_detach(device_t dev, int flags)
589 {
590 struct adapter *adapter = device_private(dev);
591 struct ixgbe_hw *hw = &adapter->hw;
592 struct tx_ring *txr = adapter->tx_rings;
593 struct rx_ring *rxr = adapter->rx_rings;
594 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
595
596 INIT_DEBUGOUT("ixv_detach: begin");
597 if (adapter->osdep.attached == false)
598 return 0;
599
600 /* Stop the interface. Callouts are stopped in it. */
601 ixv_ifstop(adapter->ifp, 1);
602
603 #if NVLAN > 0
604 /* Make sure VLANs are not using driver */
605 if (!VLAN_ATTACHED(&adapter->osdep.ec))
606 ; /* nothing to do: no VLANs */
607 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
608 vlan_ifdetach(adapter->ifp);
609 else {
610 aprint_error_dev(dev, "VLANs in use, detach first\n");
611 return EBUSY;
612 }
613 #endif
614
615 ether_ifdetach(adapter->ifp);
616 callout_halt(&adapter->timer, NULL);
617 ixv_free_deferred_handlers(adapter);
618
619 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
620 netmap_detach(adapter->ifp);
621
622 ixv_free_pci_resources(adapter);
623 #if 0 /* XXX the NetBSD port is probably missing something here */
624 bus_generic_detach(dev);
625 #endif
626 if_detach(adapter->ifp);
627 ifmedia_fini(&adapter->media);
628 if_percpuq_destroy(adapter->ipq);
629
630 sysctl_teardown(&adapter->sysctllog);
631 evcnt_detach(&adapter->efbig_tx_dma_setup);
632 evcnt_detach(&adapter->mbuf_defrag_failed);
633 evcnt_detach(&adapter->efbig2_tx_dma_setup);
634 evcnt_detach(&adapter->einval_tx_dma_setup);
635 evcnt_detach(&adapter->other_tx_dma_setup);
636 evcnt_detach(&adapter->eagain_tx_dma_setup);
637 evcnt_detach(&adapter->enomem_tx_dma_setup);
638 evcnt_detach(&adapter->watchdog_events);
639 evcnt_detach(&adapter->tso_err);
640 evcnt_detach(&adapter->admin_irqev);
641 evcnt_detach(&adapter->link_workev);
642
643 txr = adapter->tx_rings;
644 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
645 evcnt_detach(&adapter->queues[i].irqs);
646 evcnt_detach(&adapter->queues[i].handleq);
647 evcnt_detach(&adapter->queues[i].req);
648 evcnt_detach(&txr->no_desc_avail);
649 evcnt_detach(&txr->total_packets);
650 evcnt_detach(&txr->tso_tx);
651 #ifndef IXGBE_LEGACY_TX
652 evcnt_detach(&txr->pcq_drops);
653 #endif
654
655 evcnt_detach(&rxr->rx_packets);
656 evcnt_detach(&rxr->rx_bytes);
657 evcnt_detach(&rxr->rx_copies);
658 evcnt_detach(&rxr->no_jmbuf);
659 evcnt_detach(&rxr->rx_discarded);
660 }
661 evcnt_detach(&stats->ipcs);
662 evcnt_detach(&stats->l4cs);
663 evcnt_detach(&stats->ipcs_bad);
664 evcnt_detach(&stats->l4cs_bad);
665
666 /* Packet Reception Stats */
667 evcnt_detach(&stats->vfgorc);
668 evcnt_detach(&stats->vfgprc);
669 evcnt_detach(&stats->vfmprc);
670
671 /* Packet Transmission Stats */
672 evcnt_detach(&stats->vfgotc);
673 evcnt_detach(&stats->vfgptc);
674
675 /* Mailbox Stats */
676 evcnt_detach(&hw->mbx.stats.msgs_tx);
677 evcnt_detach(&hw->mbx.stats.msgs_rx);
678 evcnt_detach(&hw->mbx.stats.acks);
679 evcnt_detach(&hw->mbx.stats.reqs);
680 evcnt_detach(&hw->mbx.stats.rsts);
681
682 ixgbe_free_queues(adapter);
683
684 IXGBE_CORE_LOCK_DESTROY(adapter);
685
686 return (0);
687 } /* ixv_detach */
688
689 /************************************************************************
690 * ixv_init_locked - Init entry point
691 *
692 * Used in two ways: It is used by the stack as an init entry
693 * point in network interface structure. It is also used
694 * by the driver as a hw/sw initialization routine to get
695 * to a consistent state.
696 *
697 * return 0 on success, positive on failure
698 ************************************************************************/
699 static void
700 ixv_init_locked(struct adapter *adapter)
701 {
702 struct ifnet *ifp = adapter->ifp;
703 device_t dev = adapter->dev;
704 struct ixgbe_hw *hw = &adapter->hw;
705 struct ix_queue *que;
706 int error = 0;
707 uint32_t mask;
708 int i;
709
710 INIT_DEBUGOUT("ixv_init_locked: begin");
711 KASSERT(mutex_owned(&adapter->core_mtx));
712 hw->adapter_stopped = FALSE;
713 hw->mac.ops.stop_adapter(hw);
714 callout_stop(&adapter->timer);
715 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
716 que->disabled_count = 0;
717
718 adapter->max_frame_size =
719 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
720
721 /* reprogram the RAR[0] in case user changed it. */
722 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
723
724 /* Get the latest mac address, User can use a LAA */
725 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
726 IXGBE_ETH_LENGTH_OF_ADDRESS);
727 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
728
729 /* Prepare transmit descriptors and buffers */
730 if (ixgbe_setup_transmit_structures(adapter)) {
731 aprint_error_dev(dev, "Could not setup transmit structures\n");
732 ixv_stop_locked(adapter);
733 return;
734 }
735
736 /* Reset VF and renegotiate mailbox API version */
737 hw->mac.ops.reset_hw(hw);
738 hw->mac.ops.start_hw(hw);
739 error = ixv_negotiate_api(adapter);
740 if (error)
741 device_printf(dev,
742 "Mailbox API negotiation failed in init_locked!\n");
743
744 ixv_initialize_transmit_units(adapter);
745
746 /* Setup Multicast table */
747 ixv_set_rxfilter(adapter);
748
749 /*
750 * Determine the correct mbuf pool
751 * for doing jumbo/headersplit
752 */
753 if (adapter->max_frame_size <= MCLBYTES)
754 adapter->rx_mbuf_sz = MCLBYTES;
755 else
756 adapter->rx_mbuf_sz = MJUMPAGESIZE;
757
758 /* Prepare receive descriptors and buffers */
759 error = ixgbe_setup_receive_structures(adapter);
760 if (error) {
761 device_printf(dev,
762 "Could not setup receive structures (err = %d)\n", error);
763 ixv_stop_locked(adapter);
764 return;
765 }
766
767 /* Configure RX settings */
768 ixv_initialize_receive_units(adapter);
769
770 /* Initialize variable holding task enqueue requests interrupts */
771 adapter->task_requests = 0;
772
773 /* Set up VLAN offload and filter */
774 ixv_setup_vlan_support(adapter);
775
776 /* Set up MSI-X routing */
777 ixv_configure_ivars(adapter);
778
779 /* Set up auto-mask */
780 mask = (1 << adapter->vector);
781 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
782 mask |= (1 << que->msix);
783 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
784
785 /* Set moderation on the Link interrupt */
786 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
787
788 /* Stats init */
789 ixv_init_stats(adapter);
790
791 /* Config/Enable Link */
792 hw->mac.get_link_status = TRUE;
793 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
794 FALSE);
795
796 /* Start watchdog */
797 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
798 atomic_store_relaxed(&adapter->timer_pending, 0);
799
800 /* OK to schedule workqueues. */
801 adapter->schedule_wqs_ok = true;
802
803 /* And now turn on interrupts */
804 ixv_enable_intr(adapter);
805
806 /* Update saved flags. See ixgbe_ifflags_cb() */
807 adapter->if_flags = ifp->if_flags;
808 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
809
810 /* Now inform the stack we're ready */
811 ifp->if_flags |= IFF_RUNNING;
812 ifp->if_flags &= ~IFF_OACTIVE;
813
814 return;
815 } /* ixv_init_locked */
816
817 /************************************************************************
818 * ixv_enable_queue
819 ************************************************************************/
820 static inline void
821 ixv_enable_queue(struct adapter *adapter, u32 vector)
822 {
823 struct ixgbe_hw *hw = &adapter->hw;
824 struct ix_queue *que = &adapter->queues[vector];
825 u32 queue = 1UL << vector;
826 u32 mask;
827
828 mutex_enter(&que->dc_mtx);
829 if (que->disabled_count > 0 && --que->disabled_count > 0)
830 goto out;
831
832 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
833 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
834 out:
835 mutex_exit(&que->dc_mtx);
836 } /* ixv_enable_queue */
837
838 /************************************************************************
839 * ixv_disable_queue
840 ************************************************************************/
841 static inline void
842 ixv_disable_queue(struct adapter *adapter, u32 vector)
843 {
844 struct ixgbe_hw *hw = &adapter->hw;
845 struct ix_queue *que = &adapter->queues[vector];
846 u32 queue = 1UL << vector;
847 u32 mask;
848
849 mutex_enter(&que->dc_mtx);
850 if (que->disabled_count++ > 0)
851 goto out;
852
853 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
854 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
855 out:
856 mutex_exit(&que->dc_mtx);
857 } /* ixv_disable_queue */
858
859 #if 0
860 static inline void
861 ixv_rearm_queues(struct adapter *adapter, u64 queues)
862 {
863 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
864 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
865 } /* ixv_rearm_queues */
866 #endif
867
868
869 /************************************************************************
870 * ixv_msix_que - MSI-X Queue Interrupt Service routine
871 ************************************************************************/
872 static int
873 ixv_msix_que(void *arg)
874 {
875 struct ix_queue *que = arg;
876 struct adapter *adapter = que->adapter;
877 struct tx_ring *txr = que->txr;
878 struct rx_ring *rxr = que->rxr;
879 bool more;
880 u32 newitr = 0;
881
882 ixv_disable_queue(adapter, que->msix);
883 ++que->irqs.ev_count;
884
885 #ifdef __NetBSD__
886 /* Don't run ixgbe_rxeof in interrupt context */
887 more = true;
888 #else
889 more = ixgbe_rxeof(que);
890 #endif
891
892 IXGBE_TX_LOCK(txr);
893 ixgbe_txeof(txr);
894 IXGBE_TX_UNLOCK(txr);
895
896 /* Do AIM now? */
897
898 if (adapter->enable_aim == false)
899 goto no_calc;
900 /*
901 * Do Adaptive Interrupt Moderation:
902 * - Write out last calculated setting
903 * - Calculate based on average size over
904 * the last interval.
905 */
906 if (que->eitr_setting)
907 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
908
909 que->eitr_setting = 0;
910
911 /* Idle, do nothing */
912 if ((txr->bytes == 0) && (rxr->bytes == 0))
913 goto no_calc;
914
915 if ((txr->bytes) && (txr->packets))
916 newitr = txr->bytes/txr->packets;
917 if ((rxr->bytes) && (rxr->packets))
918 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
919 newitr += 24; /* account for hardware frame, crc */
920
921 /* set an upper boundary */
922 newitr = uimin(newitr, 3000);
923
924 /* Be nice to the mid range */
925 if ((newitr > 300) && (newitr < 1200))
926 newitr = (newitr / 3);
927 else
928 newitr = (newitr / 2);
929
930 /*
931 * When RSC is used, ITR interval must be larger than RSC_DELAY.
932 * Currently, we use 2us for RSC_DELAY. The minimum value is always
933 * greater than 2us on 100M (and 10M?(not documented)), but it's not
934 * on 1G and higher.
935 */
936 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
937 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
938 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
939 newitr = IXGBE_MIN_RSC_EITR_10G1G;
940 }
941
942 /* save for next interrupt */
943 que->eitr_setting = newitr;
944
945 /* Reset state */
946 txr->bytes = 0;
947 txr->packets = 0;
948 rxr->bytes = 0;
949 rxr->packets = 0;
950
951 no_calc:
952 if (more)
953 softint_schedule(que->que_si);
954 else /* Re-enable this interrupt */
955 ixv_enable_queue(adapter, que->msix);
956
957 return 1;
958 } /* ixv_msix_que */
959
960 /************************************************************************
961 * ixv_msix_mbx
962 ************************************************************************/
963 static int
964 ixv_msix_mbx(void *arg)
965 {
966 struct adapter *adapter = arg;
967 struct ixgbe_hw *hw = &adapter->hw;
968
969 ++adapter->admin_irqev.ev_count;
970 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
971
972 /* Link status change */
973 hw->mac.get_link_status = TRUE;
974 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX);
975 ixv_schedule_admin_tasklet(adapter);
976
977 return 1;
978 } /* ixv_msix_mbx */
979
980 static void
981 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
982 {
983
984 /*
985 * Newer devices than 82598 have VF function, so this function is
986 * simple.
987 */
988 itr |= IXGBE_EITR_CNT_WDIS;
989
990 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
991 }
992
993
994 /************************************************************************
995 * ixv_media_status - Media Ioctl callback
996 *
997 * Called whenever the user queries the status of
998 * the interface using ifconfig.
999 ************************************************************************/
1000 static void
1001 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1002 {
1003 struct adapter *adapter = ifp->if_softc;
1004
1005 INIT_DEBUGOUT("ixv_media_status: begin");
1006 ixv_update_link_status(adapter);
1007
1008 ifmr->ifm_status = IFM_AVALID;
1009 ifmr->ifm_active = IFM_ETHER;
1010
1011 if (adapter->link_active != LINK_STATE_UP) {
1012 ifmr->ifm_active |= IFM_NONE;
1013 return;
1014 }
1015
1016 ifmr->ifm_status |= IFM_ACTIVE;
1017
1018 switch (adapter->link_speed) {
1019 case IXGBE_LINK_SPEED_10GB_FULL:
1020 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1021 break;
1022 case IXGBE_LINK_SPEED_5GB_FULL:
1023 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1024 break;
1025 case IXGBE_LINK_SPEED_2_5GB_FULL:
1026 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1027 break;
1028 case IXGBE_LINK_SPEED_1GB_FULL:
1029 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1030 break;
1031 case IXGBE_LINK_SPEED_100_FULL:
1032 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1033 break;
1034 case IXGBE_LINK_SPEED_10_FULL:
1035 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1036 break;
1037 }
1038
1039 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1040 } /* ixv_media_status */
1041
1042 /************************************************************************
1043 * ixv_media_change - Media Ioctl callback
1044 *
1045 * Called when the user changes speed/duplex using
1046 * media/mediopt option with ifconfig.
1047 ************************************************************************/
1048 static int
1049 ixv_media_change(struct ifnet *ifp)
1050 {
1051 struct adapter *adapter = ifp->if_softc;
1052 struct ifmedia *ifm = &adapter->media;
1053
1054 INIT_DEBUGOUT("ixv_media_change: begin");
1055
1056 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1057 return (EINVAL);
1058
1059 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1060 case IFM_AUTO:
1061 break;
1062 default:
1063 device_printf(adapter->dev, "Only auto media type\n");
1064 return (EINVAL);
1065 }
1066
1067 return (0);
1068 } /* ixv_media_change */
1069
1070 static void
1071 ixv_schedule_admin_tasklet(struct adapter *adapter)
1072 {
1073 if (adapter->schedule_wqs_ok) {
1074 if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
1075 workqueue_enqueue(adapter->admin_wq,
1076 &adapter->admin_wc, NULL);
1077 }
1078 }
1079
1080 /************************************************************************
1081 * ixv_negotiate_api
1082 *
1083 * Negotiate the Mailbox API with the PF;
1084 * start with the most featured API first.
1085 ************************************************************************/
1086 static int
1087 ixv_negotiate_api(struct adapter *adapter)
1088 {
1089 struct ixgbe_hw *hw = &adapter->hw;
1090 int mbx_api[] = { ixgbe_mbox_api_13,
1091 ixgbe_mbox_api_12,
1092 ixgbe_mbox_api_11,
1093 ixgbe_mbox_api_10,
1094 ixgbe_mbox_api_unknown };
1095 int i = 0;
1096
1097 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1098 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1099 return (0);
1100 i++;
1101 }
1102
1103 return (EINVAL);
1104 } /* ixv_negotiate_api */
1105
1106
1107 /************************************************************************
1108 * ixv_set_rxfilter - Multicast Update
1109 *
1110 * Called whenever multicast address list is updated.
1111 ************************************************************************/
1112 static int
1113 ixv_set_rxfilter(struct adapter *adapter)
1114 {
1115 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
1116 struct ifnet *ifp = adapter->ifp;
1117 struct ixgbe_hw *hw = &adapter->hw;
1118 u8 *update_ptr;
1119 int mcnt = 0;
1120 struct ethercom *ec = &adapter->osdep.ec;
1121 struct ether_multi *enm;
1122 struct ether_multistep step;
1123 bool overflow = false;
1124 int error, rc = 0;
1125
1126 KASSERT(mutex_owned(&adapter->core_mtx));
1127 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
1128
1129 /* 1: For PROMISC */
1130 if (ifp->if_flags & IFF_PROMISC) {
1131 error = hw->mac.ops.update_xcast_mode(hw,
1132 IXGBEVF_XCAST_MODE_PROMISC);
1133 if (error == IXGBE_ERR_NOT_TRUSTED) {
1134 device_printf(adapter->dev,
1135 "this interface is not trusted\n");
1136 error = EPERM;
1137 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1138 device_printf(adapter->dev,
1139 "the PF doesn't support promisc mode\n");
1140 error = EOPNOTSUPP;
1141 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
1142 device_printf(adapter->dev,
1143 "the PF may not in promisc mode\n");
1144 error = EINVAL;
1145 } else if (error) {
1146 device_printf(adapter->dev,
1147 "failed to set promisc mode. error = %d\n",
1148 error);
1149 error = EIO;
1150 } else
1151 return 0;
1152 rc = error;
1153 }
1154
1155 /* 2: For ALLMULTI or normal */
1156 ETHER_LOCK(ec);
1157 ETHER_FIRST_MULTI(step, ec, enm);
1158 while (enm != NULL) {
1159 if ((mcnt >= IXGBE_MAX_VF_MC) ||
1160 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1161 ETHER_ADDR_LEN) != 0)) {
1162 overflow = true;
1163 break;
1164 }
1165 bcopy(enm->enm_addrlo,
1166 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1167 IXGBE_ETH_LENGTH_OF_ADDRESS);
1168 mcnt++;
1169 ETHER_NEXT_MULTI(step, enm);
1170 }
1171 ETHER_UNLOCK(ec);
1172
1173 /* 3: For ALLMULTI */
1174 if (overflow) {
1175 error = hw->mac.ops.update_xcast_mode(hw,
1176 IXGBEVF_XCAST_MODE_ALLMULTI);
1177 if (error == IXGBE_ERR_NOT_TRUSTED) {
1178 device_printf(adapter->dev,
1179 "this interface is not trusted\n");
1180 error = EPERM;
1181 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1182 device_printf(adapter->dev,
1183 "the PF doesn't support allmulti mode\n");
1184 error = EOPNOTSUPP;
1185 } else if (error) {
1186 device_printf(adapter->dev,
1187 "number of Ethernet multicast addresses "
1188 "exceeds the limit (%d). error = %d\n",
1189 IXGBE_MAX_VF_MC, error);
1190 error = ENOSPC;
1191 } else {
1192 ETHER_LOCK(ec);
1193 ec->ec_flags |= ETHER_F_ALLMULTI;
1194 ETHER_UNLOCK(ec);
1195 return rc; /* Promisc might have failed */
1196 }
1197
1198 if (rc == 0)
1199 rc = error;
1200
1201 /* Continue to update the multicast table as many as we can */
1202 }
1203
1204 /* 4: For normal operation */
1205 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
1206 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
1207 /* Normal operation */
1208 ETHER_LOCK(ec);
1209 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1210 ETHER_UNLOCK(ec);
1211 error = 0;
1212 } else if (error) {
1213 device_printf(adapter->dev,
1214 "failed to set Ethernet multicast address "
1215 "operation to normal. error = %d\n", error);
1216 }
1217
1218 update_ptr = mta;
1219
1220 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw,
1221 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1222 if (rc == 0)
1223 rc = error;
1224
1225 return rc;
1226 } /* ixv_set_rxfilter */
1227
1228 /************************************************************************
1229 * ixv_mc_array_itr
1230 *
1231 * An iterator function needed by the multicast shared code.
1232 * It feeds the shared code routine the addresses in the
1233 * array of ixv_set_rxfilter() one by one.
1234 ************************************************************************/
1235 static u8 *
1236 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1237 {
1238 u8 *addr = *update_ptr;
1239 u8 *newptr;
1240
1241 *vmdq = 0;
1242
1243 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1244 *update_ptr = newptr;
1245
1246 return addr;
1247 } /* ixv_mc_array_itr */
1248
1249 /************************************************************************
1250 * ixv_local_timer - Timer routine
1251 *
1252 * Checks for link status, updates statistics,
1253 * and runs the watchdog check.
1254 ************************************************************************/
1255 static void
1256 ixv_local_timer(void *arg)
1257 {
1258 struct adapter *adapter = arg;
1259
1260 if (adapter->schedule_wqs_ok) {
1261 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
1262 workqueue_enqueue(adapter->timer_wq,
1263 &adapter->timer_wc, NULL);
1264 }
1265 }
1266
1267 static void
1268 ixv_handle_timer(struct work *wk, void *context)
1269 {
1270 struct adapter *adapter = context;
1271 device_t dev = adapter->dev;
1272 struct ix_queue *que = adapter->queues;
1273 u64 queues = 0;
1274 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1275 int hung = 0;
1276 int i;
1277
1278 IXGBE_CORE_LOCK(adapter);
1279
1280 if (ixv_check_link(adapter)) {
1281 ixv_init_locked(adapter);
1282 IXGBE_CORE_UNLOCK(adapter);
1283 return;
1284 }
1285
1286 /* Stats Update */
1287 ixv_update_stats(adapter);
1288
1289 /* Update some event counters */
1290 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1291 que = adapter->queues;
1292 for (i = 0; i < adapter->num_queues; i++, que++) {
1293 struct tx_ring *txr = que->txr;
1294
1295 v0 += txr->q_efbig_tx_dma_setup;
1296 v1 += txr->q_mbuf_defrag_failed;
1297 v2 += txr->q_efbig2_tx_dma_setup;
1298 v3 += txr->q_einval_tx_dma_setup;
1299 v4 += txr->q_other_tx_dma_setup;
1300 v5 += txr->q_eagain_tx_dma_setup;
1301 v6 += txr->q_enomem_tx_dma_setup;
1302 v7 += txr->q_tso_err;
1303 }
1304 adapter->efbig_tx_dma_setup.ev_count = v0;
1305 adapter->mbuf_defrag_failed.ev_count = v1;
1306 adapter->efbig2_tx_dma_setup.ev_count = v2;
1307 adapter->einval_tx_dma_setup.ev_count = v3;
1308 adapter->other_tx_dma_setup.ev_count = v4;
1309 adapter->eagain_tx_dma_setup.ev_count = v5;
1310 adapter->enomem_tx_dma_setup.ev_count = v6;
1311 adapter->tso_err.ev_count = v7;
1312
1313 /*
1314 * Check the TX queues status
1315 * - mark hung queues so we don't schedule on them
1316 * - watchdog only if all queues show hung
1317 */
1318 que = adapter->queues;
1319 for (i = 0; i < adapter->num_queues; i++, que++) {
1320 /* Keep track of queues with work for soft irq */
1321 if (que->txr->busy)
1322 queues |= ((u64)1 << que->me);
1323 /*
1324 * Each time txeof runs without cleaning, but there
1325 * are uncleaned descriptors it increments busy. If
1326 * we get to the MAX we declare it hung.
1327 */
1328 if (que->busy == IXGBE_QUEUE_HUNG) {
1329 ++hung;
1330 /* Mark the queue as inactive */
1331 adapter->active_queues &= ~((u64)1 << que->me);
1332 continue;
1333 } else {
1334 /* Check if we've come back from hung */
1335 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1336 adapter->active_queues |= ((u64)1 << que->me);
1337 }
1338 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1339 device_printf(dev,
1340 "Warning queue %d appears to be hung!\n", i);
1341 que->txr->busy = IXGBE_QUEUE_HUNG;
1342 ++hung;
1343 }
1344 }
1345
1346 /* Only truly watchdog if all queues show hung */
1347 if (hung == adapter->num_queues)
1348 goto watchdog;
1349 #if 0
1350 else if (queues != 0) { /* Force an IRQ on queues with work */
1351 ixv_rearm_queues(adapter, queues);
1352 }
1353 #endif
1354
1355 atomic_store_relaxed(&adapter->timer_pending, 0);
1356 IXGBE_CORE_UNLOCK(adapter);
1357 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1358
1359 return;
1360
1361 watchdog:
1362 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1363 adapter->ifp->if_flags &= ~IFF_RUNNING;
1364 adapter->watchdog_events.ev_count++;
1365 ixv_init_locked(adapter);
1366 IXGBE_CORE_UNLOCK(adapter);
1367 } /* ixv_handle_timer */
1368
1369 /************************************************************************
1370 * ixv_update_link_status - Update OS on link state
1371 *
1372 * Note: Only updates the OS on the cached link state.
1373 * The real check of the hardware only happens with
1374 * a link interrupt.
1375 ************************************************************************/
1376 static void
1377 ixv_update_link_status(struct adapter *adapter)
1378 {
1379 struct ifnet *ifp = adapter->ifp;
1380 device_t dev = adapter->dev;
1381
1382 KASSERT(mutex_owned(&adapter->core_mtx));
1383
1384 if (adapter->link_up) {
1385 if (adapter->link_active != LINK_STATE_UP) {
1386 if (bootverbose) {
1387 const char *bpsmsg;
1388
1389 switch (adapter->link_speed) {
1390 case IXGBE_LINK_SPEED_10GB_FULL:
1391 bpsmsg = "10 Gbps";
1392 break;
1393 case IXGBE_LINK_SPEED_5GB_FULL:
1394 bpsmsg = "5 Gbps";
1395 break;
1396 case IXGBE_LINK_SPEED_2_5GB_FULL:
1397 bpsmsg = "2.5 Gbps";
1398 break;
1399 case IXGBE_LINK_SPEED_1GB_FULL:
1400 bpsmsg = "1 Gbps";
1401 break;
1402 case IXGBE_LINK_SPEED_100_FULL:
1403 bpsmsg = "100 Mbps";
1404 break;
1405 case IXGBE_LINK_SPEED_10_FULL:
1406 bpsmsg = "10 Mbps";
1407 break;
1408 default:
1409 bpsmsg = "unknown speed";
1410 break;
1411 }
1412 device_printf(dev, "Link is up %s %s \n",
1413 bpsmsg, "Full Duplex");
1414 }
1415 adapter->link_active = LINK_STATE_UP;
1416 if_link_state_change(ifp, LINK_STATE_UP);
1417 }
1418 } else {
1419 /*
1420 * Do it when link active changes to DOWN. i.e.
1421 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1422 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1423 */
1424 if (adapter->link_active != LINK_STATE_DOWN) {
1425 if (bootverbose)
1426 device_printf(dev, "Link is Down\n");
1427 if_link_state_change(ifp, LINK_STATE_DOWN);
1428 adapter->link_active = LINK_STATE_DOWN;
1429 }
1430 }
1431 } /* ixv_update_link_status */
1432
1433
1434 /************************************************************************
1435 * ixv_stop - Stop the hardware
1436 *
1437 * Disables all traffic on the adapter by issuing a
1438 * global reset on the MAC and deallocates TX/RX buffers.
1439 ************************************************************************/
1440 static void
1441 ixv_ifstop(struct ifnet *ifp, int disable)
1442 {
1443 struct adapter *adapter = ifp->if_softc;
1444
1445 IXGBE_CORE_LOCK(adapter);
1446 ixv_stop_locked(adapter);
1447 IXGBE_CORE_UNLOCK(adapter);
1448
1449 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
1450 atomic_store_relaxed(&adapter->admin_pending, 0);
1451 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
1452 atomic_store_relaxed(&adapter->timer_pending, 0);
1453 }
1454
1455 static void
1456 ixv_stop_locked(void *arg)
1457 {
1458 struct ifnet *ifp;
1459 struct adapter *adapter = arg;
1460 struct ixgbe_hw *hw = &adapter->hw;
1461
1462 ifp = adapter->ifp;
1463
1464 KASSERT(mutex_owned(&adapter->core_mtx));
1465
1466 INIT_DEBUGOUT("ixv_stop_locked: begin\n");
1467 ixv_disable_intr(adapter);
1468
1469 /* Tell the stack that the interface is no longer active */
1470 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1471
1472 hw->mac.ops.reset_hw(hw);
1473 adapter->hw.adapter_stopped = FALSE;
1474 hw->mac.ops.stop_adapter(hw);
1475 callout_stop(&adapter->timer);
1476
1477 /* Don't schedule workqueues. */
1478 adapter->schedule_wqs_ok = false;
1479
1480 /* reprogram the RAR[0] in case user changed it. */
1481 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1482
1483 return;
1484 } /* ixv_stop_locked */
1485
1486
1487 /************************************************************************
1488 * ixv_allocate_pci_resources
1489 ************************************************************************/
1490 static int
1491 ixv_allocate_pci_resources(struct adapter *adapter,
1492 const struct pci_attach_args *pa)
1493 {
1494 pcireg_t memtype, csr;
1495 device_t dev = adapter->dev;
1496 bus_addr_t addr;
1497 int flags;
1498
1499 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1500 switch (memtype) {
1501 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1502 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1503 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1504 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1505 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1506 goto map_err;
1507 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1508 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1509 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1510 }
1511 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1512 adapter->osdep.mem_size, flags,
1513 &adapter->osdep.mem_bus_space_handle) != 0) {
1514 map_err:
1515 adapter->osdep.mem_size = 0;
1516 aprint_error_dev(dev, "unable to map BAR0\n");
1517 return ENXIO;
1518 }
1519 /*
1520 * Enable address decoding for memory range in case it's not
1521 * set.
1522 */
1523 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1524 PCI_COMMAND_STATUS_REG);
1525 csr |= PCI_COMMAND_MEM_ENABLE;
1526 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1527 csr);
1528 break;
1529 default:
1530 aprint_error_dev(dev, "unexpected type on BAR0\n");
1531 return ENXIO;
1532 }
1533
1534 /* Pick up the tuneable queues */
1535 adapter->num_queues = ixv_num_queues;
1536
1537 return (0);
1538 } /* ixv_allocate_pci_resources */
1539
1540 static void
1541 ixv_free_deferred_handlers(struct adapter *adapter)
1542 {
1543 struct ix_queue *que = adapter->queues;
1544 struct tx_ring *txr = adapter->tx_rings;
1545 int i;
1546
1547 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
1548 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
1549 if (txr->txr_si != NULL)
1550 softint_disestablish(txr->txr_si);
1551 }
1552 if (que->que_si != NULL)
1553 softint_disestablish(que->que_si);
1554 }
1555 if (adapter->txr_wq != NULL)
1556 workqueue_destroy(adapter->txr_wq);
1557 if (adapter->txr_wq_enqueued != NULL)
1558 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
1559 if (adapter->que_wq != NULL)
1560 workqueue_destroy(adapter->que_wq);
1561
1562 /* Drain the Mailbox(link) queue */
1563 if (adapter->admin_wq != NULL) {
1564 workqueue_destroy(adapter->admin_wq);
1565 adapter->admin_wq = NULL;
1566 }
1567 if (adapter->timer_wq != NULL) {
1568 workqueue_destroy(adapter->timer_wq);
1569 adapter->timer_wq = NULL;
1570 }
1571 } /* ixv_free_deferred_handlers */
1572
1573 /************************************************************************
1574 * ixv_free_pci_resources
1575 ************************************************************************/
1576 static void
1577 ixv_free_pci_resources(struct adapter * adapter)
1578 {
1579 struct ix_queue *que = adapter->queues;
1580 int rid;
1581
1582 /*
1583 * Release all msix queue resources:
1584 */
1585 for (int i = 0; i < adapter->num_queues; i++, que++) {
1586 if (que->res != NULL)
1587 pci_intr_disestablish(adapter->osdep.pc,
1588 adapter->osdep.ihs[i]);
1589 }
1590
1591
1592 /* Clean the Mailbox interrupt last */
1593 rid = adapter->vector;
1594
1595 if (adapter->osdep.ihs[rid] != NULL) {
1596 pci_intr_disestablish(adapter->osdep.pc,
1597 adapter->osdep.ihs[rid]);
1598 adapter->osdep.ihs[rid] = NULL;
1599 }
1600
1601 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1602 adapter->osdep.nintrs);
1603
1604 if (adapter->osdep.mem_size != 0) {
1605 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1606 adapter->osdep.mem_bus_space_handle,
1607 adapter->osdep.mem_size);
1608 }
1609
1610 return;
1611 } /* ixv_free_pci_resources */
1612
1613 /************************************************************************
1614 * ixv_setup_interface
1615 *
1616 * Setup networking device structure and register an interface.
1617 ************************************************************************/
1618 static int
1619 ixv_setup_interface(device_t dev, struct adapter *adapter)
1620 {
1621 struct ethercom *ec = &adapter->osdep.ec;
1622 struct ifnet *ifp;
1623
1624 INIT_DEBUGOUT("ixv_setup_interface: begin");
1625
1626 ifp = adapter->ifp = &ec->ec_if;
1627 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1628 ifp->if_baudrate = IF_Gbps(10);
1629 ifp->if_init = ixv_init;
1630 ifp->if_stop = ixv_ifstop;
1631 ifp->if_softc = adapter;
1632 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1633 #ifdef IXGBE_MPSAFE
1634 ifp->if_extflags = IFEF_MPSAFE;
1635 #endif
1636 ifp->if_ioctl = ixv_ioctl;
1637 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1638 #if 0
1639 ixv_start_locked = ixgbe_legacy_start_locked;
1640 #endif
1641 } else {
1642 ifp->if_transmit = ixgbe_mq_start;
1643 #if 0
1644 ixv_start_locked = ixgbe_mq_start_locked;
1645 #endif
1646 }
1647 ifp->if_start = ixgbe_legacy_start;
1648 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1649 IFQ_SET_READY(&ifp->if_snd);
1650
1651 if_initialize(ifp);
1652 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1653 ether_ifattach(ifp, adapter->hw.mac.addr);
1654 aprint_normal_dev(dev, "Ethernet address %s\n",
1655 ether_sprintf(adapter->hw.mac.addr));
1656 /*
1657 * We use per TX queue softint, so if_deferred_start_init() isn't
1658 * used.
1659 */
1660 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1661
1662 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1663
1664 /*
1665 * Tell the upper layer(s) we support long frames.
1666 */
1667 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1668
1669 /* Set capability flags */
1670 ifp->if_capabilities |= IFCAP_HWCSUM
1671 | IFCAP_TSOv4
1672 | IFCAP_TSOv6;
1673 ifp->if_capenable = 0;
1674
1675 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1676 | ETHERCAP_VLAN_HWTAGGING
1677 | ETHERCAP_VLAN_HWCSUM
1678 | ETHERCAP_JUMBO_MTU
1679 | ETHERCAP_VLAN_MTU;
1680
1681 /* Enable the above capabilities by default */
1682 ec->ec_capenable = ec->ec_capabilities;
1683
1684 /* Don't enable LRO by default */
1685 #if 0
1686 /* NetBSD doesn't support LRO yet */
1687 ifp->if_capabilities |= IFCAP_LRO;
1688 #endif
1689
1690 /*
1691 * Specify the media types supported by this adapter and register
1692 * callbacks to update media and link information
1693 */
1694 ec->ec_ifmedia = &adapter->media;
1695 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixv_media_change,
1696 ixv_media_status, &adapter->core_mtx);
1697 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1698 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1699
1700 if_register(ifp);
1701
1702 return 0;
1703 } /* ixv_setup_interface */
1704
1705
1706 /************************************************************************
1707 * ixv_initialize_transmit_units - Enable transmit unit.
1708 ************************************************************************/
1709 static void
1710 ixv_initialize_transmit_units(struct adapter *adapter)
1711 {
1712 struct tx_ring *txr = adapter->tx_rings;
1713 struct ixgbe_hw *hw = &adapter->hw;
1714 int i;
1715
1716 for (i = 0; i < adapter->num_queues; i++, txr++) {
1717 u64 tdba = txr->txdma.dma_paddr;
1718 u32 txctrl, txdctl;
1719 int j = txr->me;
1720
1721 /* Set WTHRESH to 8, burst writeback */
1722 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1723 txdctl |= (8 << 16);
1724 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1725
1726 /* Set the HW Tx Head and Tail indices */
1727 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1728 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1729
1730 /* Set Tx Tail register */
1731 txr->tail = IXGBE_VFTDT(j);
1732
1733 txr->txr_no_space = false;
1734
1735 /* Set Ring parameters */
1736 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1737 (tdba & 0x00000000ffffffffULL));
1738 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1739 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1740 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1741 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1742 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1743 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1744
1745 /* Now enable */
1746 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1747 txdctl |= IXGBE_TXDCTL_ENABLE;
1748 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1749 }
1750
1751 return;
1752 } /* ixv_initialize_transmit_units */
1753
1754
1755 /************************************************************************
1756 * ixv_initialize_rss_mapping
1757 ************************************************************************/
1758 static void
1759 ixv_initialize_rss_mapping(struct adapter *adapter)
1760 {
1761 struct ixgbe_hw *hw = &adapter->hw;
1762 u32 reta = 0, mrqc, rss_key[10];
1763 int queue_id;
1764 int i, j;
1765 u32 rss_hash_config;
1766
1767 /* force use default RSS key. */
1768 #ifdef __NetBSD__
1769 rss_getkey((uint8_t *) &rss_key);
1770 #else
1771 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1772 /* Fetch the configured RSS key */
1773 rss_getkey((uint8_t *)&rss_key);
1774 } else {
1775 /* set up random bits */
1776 cprng_fast(&rss_key, sizeof(rss_key));
1777 }
1778 #endif
1779
1780 /* Now fill out hash function seeds */
1781 for (i = 0; i < 10; i++)
1782 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1783
1784 /* Set up the redirection table */
1785 for (i = 0, j = 0; i < 64; i++, j++) {
1786 if (j == adapter->num_queues)
1787 j = 0;
1788
1789 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1790 /*
1791 * Fetch the RSS bucket id for the given indirection
1792 * entry. Cap it at the number of configured buckets
1793 * (which is num_queues.)
1794 */
1795 queue_id = rss_get_indirection_to_bucket(i);
1796 queue_id = queue_id % adapter->num_queues;
1797 } else
1798 queue_id = j;
1799
1800 /*
1801 * The low 8 bits are for hash value (n+0);
1802 * The next 8 bits are for hash value (n+1), etc.
1803 */
1804 reta >>= 8;
1805 reta |= ((uint32_t)queue_id) << 24;
1806 if ((i & 3) == 3) {
1807 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1808 reta = 0;
1809 }
1810 }
1811
1812 /* Perform hash on these packet types */
1813 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1814 rss_hash_config = rss_gethashconfig();
1815 else {
1816 /*
1817 * Disable UDP - IP fragments aren't currently being handled
1818 * and so we end up with a mix of 2-tuple and 4-tuple
1819 * traffic.
1820 */
1821 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1822 | RSS_HASHTYPE_RSS_TCP_IPV4
1823 | RSS_HASHTYPE_RSS_IPV6
1824 | RSS_HASHTYPE_RSS_TCP_IPV6;
1825 }
1826
1827 mrqc = IXGBE_MRQC_RSSEN;
1828 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1829 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1830 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1831 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1832 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1833 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1834 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1835 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1836 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1837 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1838 __func__);
1839 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1840 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1841 __func__);
1842 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1843 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1844 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1845 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1846 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1847 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1848 __func__);
1849 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1850 } /* ixv_initialize_rss_mapping */
1851
1852
1853 /************************************************************************
1854 * ixv_initialize_receive_units - Setup receive registers and features.
1855 ************************************************************************/
1856 static void
1857 ixv_initialize_receive_units(struct adapter *adapter)
1858 {
1859 struct rx_ring *rxr = adapter->rx_rings;
1860 struct ixgbe_hw *hw = &adapter->hw;
1861 struct ifnet *ifp = adapter->ifp;
1862 u32 bufsz, psrtype;
1863
1864 if (ifp->if_mtu > ETHERMTU)
1865 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1866 else
1867 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1868
1869 psrtype = IXGBE_PSRTYPE_TCPHDR
1870 | IXGBE_PSRTYPE_UDPHDR
1871 | IXGBE_PSRTYPE_IPV4HDR
1872 | IXGBE_PSRTYPE_IPV6HDR
1873 | IXGBE_PSRTYPE_L2HDR;
1874
1875 if (adapter->num_queues > 1)
1876 psrtype |= 1 << 29;
1877
1878 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1879
1880 /* Tell PF our max_frame size */
1881 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1882 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1883 }
1884
1885 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1886 u64 rdba = rxr->rxdma.dma_paddr;
1887 u32 reg, rxdctl;
1888 int j = rxr->me;
1889
1890 /* Disable the queue */
1891 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1892 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1893 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1894 for (int k = 0; k < 10; k++) {
1895 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1896 IXGBE_RXDCTL_ENABLE)
1897 msec_delay(1);
1898 else
1899 break;
1900 }
1901 IXGBE_WRITE_BARRIER(hw);
1902 /* Setup the Base and Length of the Rx Descriptor Ring */
1903 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1904 (rdba & 0x00000000ffffffffULL));
1905 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1906 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1907 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1908
1909 /* Reset the ring indices */
1910 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1911 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1912
1913 /* Set up the SRRCTL register */
1914 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1915 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1916 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1917 reg |= bufsz;
1918 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1919 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1920
1921 /* Capture Rx Tail index */
1922 rxr->tail = IXGBE_VFRDT(rxr->me);
1923
1924 /* Do the queue enabling last */
1925 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1926 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1927 for (int k = 0; k < 10; k++) {
1928 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1929 IXGBE_RXDCTL_ENABLE)
1930 break;
1931 msec_delay(1);
1932 }
1933 IXGBE_WRITE_BARRIER(hw);
1934
1935 /* Set the Tail Pointer */
1936 #ifdef DEV_NETMAP
1937 /*
1938 * In netmap mode, we must preserve the buffers made
1939 * available to userspace before the if_init()
1940 * (this is true by default on the TX side, because
1941 * init makes all buffers available to userspace).
1942 *
1943 * netmap_reset() and the device specific routines
1944 * (e.g. ixgbe_setup_receive_rings()) map these
1945 * buffers at the end of the NIC ring, so here we
1946 * must set the RDT (tail) register to make sure
1947 * they are not overwritten.
1948 *
1949 * In this driver the NIC ring starts at RDH = 0,
1950 * RDT points to the last slot available for reception (?),
1951 * so RDT = num_rx_desc - 1 means the whole ring is available.
1952 */
1953 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1954 (ifp->if_capenable & IFCAP_NETMAP)) {
1955 struct netmap_adapter *na = NA(adapter->ifp);
1956 struct netmap_kring *kring = na->rx_rings[i];
1957 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1958
1959 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1960 } else
1961 #endif /* DEV_NETMAP */
1962 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1963 adapter->num_rx_desc - 1);
1964 }
1965
1966 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1967 ixv_initialize_rss_mapping(adapter);
1968 } /* ixv_initialize_receive_units */
1969
1970 /************************************************************************
1971 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1972 *
1973 * Retrieves the TDH value from the hardware
1974 ************************************************************************/
1975 static int
1976 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1977 {
1978 struct sysctlnode node = *rnode;
1979 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1980 uint32_t val;
1981
1982 if (!txr)
1983 return (0);
1984
1985 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1986 node.sysctl_data = &val;
1987 return sysctl_lookup(SYSCTLFN_CALL(&node));
1988 } /* ixv_sysctl_tdh_handler */
1989
1990 /************************************************************************
1991 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1992 *
1993 * Retrieves the TDT value from the hardware
1994 ************************************************************************/
1995 static int
1996 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1997 {
1998 struct sysctlnode node = *rnode;
1999 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2000 uint32_t val;
2001
2002 if (!txr)
2003 return (0);
2004
2005 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
2006 node.sysctl_data = &val;
2007 return sysctl_lookup(SYSCTLFN_CALL(&node));
2008 } /* ixv_sysctl_tdt_handler */
2009
2010 /************************************************************************
2011 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
2012 * handler function
2013 *
2014 * Retrieves the next_to_check value
2015 ************************************************************************/
2016 static int
2017 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2018 {
2019 struct sysctlnode node = *rnode;
2020 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2021 uint32_t val;
2022
2023 if (!rxr)
2024 return (0);
2025
2026 val = rxr->next_to_check;
2027 node.sysctl_data = &val;
2028 return sysctl_lookup(SYSCTLFN_CALL(&node));
2029 } /* ixv_sysctl_next_to_check_handler */
2030
2031 /************************************************************************
2032 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
2033 *
2034 * Retrieves the RDH value from the hardware
2035 ************************************************************************/
2036 static int
2037 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
2038 {
2039 struct sysctlnode node = *rnode;
2040 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2041 uint32_t val;
2042
2043 if (!rxr)
2044 return (0);
2045
2046 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
2047 node.sysctl_data = &val;
2048 return sysctl_lookup(SYSCTLFN_CALL(&node));
2049 } /* ixv_sysctl_rdh_handler */
2050
2051 /************************************************************************
2052 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
2053 *
2054 * Retrieves the RDT value from the hardware
2055 ************************************************************************/
2056 static int
2057 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
2058 {
2059 struct sysctlnode node = *rnode;
2060 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2061 uint32_t val;
2062
2063 if (!rxr)
2064 return (0);
2065
2066 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
2067 node.sysctl_data = &val;
2068 return sysctl_lookup(SYSCTLFN_CALL(&node));
2069 } /* ixv_sysctl_rdt_handler */
2070
2071 static void
2072 ixv_setup_vlan_tagging(struct adapter *adapter)
2073 {
2074 struct ethercom *ec = &adapter->osdep.ec;
2075 struct ixgbe_hw *hw = &adapter->hw;
2076 struct rx_ring *rxr;
2077 u32 ctrl;
2078 int i;
2079 bool hwtagging;
2080
2081 /* Enable HW tagging only if any vlan is attached */
2082 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2083 && VLAN_ATTACHED(ec);
2084
2085 /* Enable the queues */
2086 for (i = 0; i < adapter->num_queues; i++) {
2087 rxr = &adapter->rx_rings[i];
2088 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2089 if (hwtagging)
2090 ctrl |= IXGBE_RXDCTL_VME;
2091 else
2092 ctrl &= ~IXGBE_RXDCTL_VME;
2093 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2094 /*
2095 * Let Rx path know that it needs to store VLAN tag
2096 * as part of extra mbuf info.
2097 */
2098 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2099 }
2100 } /* ixv_setup_vlan_tagging */
2101
2102 /************************************************************************
2103 * ixv_setup_vlan_support
2104 ************************************************************************/
2105 static int
2106 ixv_setup_vlan_support(struct adapter *adapter)
2107 {
2108 struct ethercom *ec = &adapter->osdep.ec;
2109 struct ixgbe_hw *hw = &adapter->hw;
2110 u32 vid, vfta, retry;
2111 struct vlanid_list *vlanidp;
2112 int rv, error = 0;
2113
2114 /*
2115 * This function is called from both if_init and ifflags_cb()
2116 * on NetBSD.
2117 */
2118
2119 /*
2120 * Part 1:
2121 * Setup VLAN HW tagging
2122 */
2123 ixv_setup_vlan_tagging(adapter);
2124
2125 if (!VLAN_ATTACHED(ec))
2126 return 0;
2127
2128 /*
2129 * Part 2:
2130 * Setup VLAN HW filter
2131 */
2132 /* Cleanup shadow_vfta */
2133 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2134 adapter->shadow_vfta[i] = 0;
2135 /* Generate shadow_vfta from ec_vids */
2136 ETHER_LOCK(ec);
2137 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2138 uint32_t idx;
2139
2140 idx = vlanidp->vid / 32;
2141 KASSERT(idx < IXGBE_VFTA_SIZE);
2142 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2143 }
2144 ETHER_UNLOCK(ec);
2145
2146 /*
2147 * A soft reset zero's out the VFTA, so
2148 * we need to repopulate it now.
2149 */
2150 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2151 if (adapter->shadow_vfta[i] == 0)
2152 continue;
2153 vfta = adapter->shadow_vfta[i];
2154 /*
2155 * Reconstruct the vlan id's
2156 * based on the bits set in each
2157 * of the array ints.
2158 */
2159 for (int j = 0; j < 32; j++) {
2160 retry = 0;
2161 if ((vfta & ((u32)1 << j)) == 0)
2162 continue;
2163 vid = (i * 32) + j;
2164
2165 /* Call the shared code mailbox routine */
2166 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2167 FALSE)) != 0) {
2168 if (++retry > 5) {
2169 device_printf(adapter->dev,
2170 "%s: max retry exceeded\n",
2171 __func__);
2172 break;
2173 }
2174 }
2175 if (rv != 0) {
2176 device_printf(adapter->dev,
2177 "failed to set vlan %d\n", vid);
2178 error = EACCES;
2179 }
2180 }
2181 }
2182 return error;
2183 } /* ixv_setup_vlan_support */
2184
2185 static int
2186 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2187 {
2188 struct ifnet *ifp = &ec->ec_if;
2189 struct adapter *adapter = ifp->if_softc;
2190 int rv;
2191
2192 if (set)
2193 rv = ixv_register_vlan(adapter, vid);
2194 else
2195 rv = ixv_unregister_vlan(adapter, vid);
2196
2197 if (rv != 0)
2198 return rv;
2199
2200 /*
2201 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2202 * or 0 to 1.
2203 */
2204 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2205 ixv_setup_vlan_tagging(adapter);
2206
2207 return rv;
2208 }
2209
2210 /************************************************************************
2211 * ixv_register_vlan
2212 *
2213 * Run via a vlan config EVENT, it enables us to use the
2214 * HW Filter table since we can get the vlan id. This just
2215 * creates the entry in the soft version of the VFTA, init
2216 * will repopulate the real table.
2217 ************************************************************************/
2218 static int
2219 ixv_register_vlan(struct adapter *adapter, u16 vtag)
2220 {
2221 struct ixgbe_hw *hw = &adapter->hw;
2222 u16 index, bit;
2223 int error;
2224
2225 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2226 return EINVAL;
2227 IXGBE_CORE_LOCK(adapter);
2228 index = (vtag >> 5) & 0x7F;
2229 bit = vtag & 0x1F;
2230 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2231 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2232 IXGBE_CORE_UNLOCK(adapter);
2233
2234 if (error != 0) {
2235 device_printf(adapter->dev, "failed to register vlan %hu\n",
2236 vtag);
2237 error = EACCES;
2238 }
2239 return error;
2240 } /* ixv_register_vlan */
2241
2242 /************************************************************************
2243 * ixv_unregister_vlan
2244 *
2245 * Run via a vlan unconfig EVENT, remove our entry
2246 * in the soft vfta.
2247 ************************************************************************/
2248 static int
2249 ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2250 {
2251 struct ixgbe_hw *hw = &adapter->hw;
2252 u16 index, bit;
2253 int error;
2254
2255 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2256 return EINVAL;
2257
2258 IXGBE_CORE_LOCK(adapter);
2259 index = (vtag >> 5) & 0x7F;
2260 bit = vtag & 0x1F;
2261 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2262 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2263 IXGBE_CORE_UNLOCK(adapter);
2264
2265 if (error != 0) {
2266 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2267 vtag);
2268 error = EIO;
2269 }
2270 return error;
2271 } /* ixv_unregister_vlan */
2272
2273 /************************************************************************
2274 * ixv_enable_intr
2275 ************************************************************************/
2276 static void
2277 ixv_enable_intr(struct adapter *adapter)
2278 {
2279 struct ixgbe_hw *hw = &adapter->hw;
2280 struct ix_queue *que = adapter->queues;
2281 u32 mask;
2282 int i;
2283
2284 /* For VTEIAC */
2285 mask = (1 << adapter->vector);
2286 for (i = 0; i < adapter->num_queues; i++, que++)
2287 mask |= (1 << que->msix);
2288 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2289
2290 /* For VTEIMS */
2291 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2292 que = adapter->queues;
2293 for (i = 0; i < adapter->num_queues; i++, que++)
2294 ixv_enable_queue(adapter, que->msix);
2295
2296 IXGBE_WRITE_FLUSH(hw);
2297 } /* ixv_enable_intr */
2298
2299 /************************************************************************
2300 * ixv_disable_intr
2301 ************************************************************************/
2302 static void
2303 ixv_disable_intr(struct adapter *adapter)
2304 {
2305 struct ix_queue *que = adapter->queues;
2306
2307 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2308
2309 /* disable interrupts other than queues */
2310 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2311
2312 for (int i = 0; i < adapter->num_queues; i++, que++)
2313 ixv_disable_queue(adapter, que->msix);
2314
2315 IXGBE_WRITE_FLUSH(&adapter->hw);
2316 } /* ixv_disable_intr */
2317
2318 /************************************************************************
2319 * ixv_set_ivar
2320 *
2321 * Setup the correct IVAR register for a particular MSI-X interrupt
2322 * - entry is the register array entry
2323 * - vector is the MSI-X vector for this queue
2324 * - type is RX/TX/MISC
2325 ************************************************************************/
2326 static void
2327 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2328 {
2329 struct ixgbe_hw *hw = &adapter->hw;
2330 u32 ivar, index;
2331
2332 vector |= IXGBE_IVAR_ALLOC_VAL;
2333
2334 if (type == -1) { /* MISC IVAR */
2335 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2336 ivar &= ~0xFF;
2337 ivar |= vector;
2338 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2339 } else { /* RX/TX IVARS */
2340 index = (16 * (entry & 1)) + (8 * type);
2341 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2342 ivar &= ~(0xffUL << index);
2343 ivar |= ((u32)vector << index);
2344 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2345 }
2346 } /* ixv_set_ivar */
2347
2348 /************************************************************************
2349 * ixv_configure_ivars
2350 ************************************************************************/
2351 static void
2352 ixv_configure_ivars(struct adapter *adapter)
2353 {
2354 struct ix_queue *que = adapter->queues;
2355
2356 /* XXX We should sync EITR value calculation with ixgbe.c? */
2357
2358 for (int i = 0; i < adapter->num_queues; i++, que++) {
2359 /* First the RX queue entry */
2360 ixv_set_ivar(adapter, i, que->msix, 0);
2361 /* ... and the TX */
2362 ixv_set_ivar(adapter, i, que->msix, 1);
2363 /* Set an initial value in EITR */
2364 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2365 }
2366
2367 /* For the mailbox interrupt */
2368 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2369 } /* ixv_configure_ivars */
2370
2371
2372 /************************************************************************
2373 * ixv_save_stats
2374 *
2375 * The VF stats registers never have a truly virgin
2376 * starting point, so this routine tries to make an
2377 * artificial one, marking ground zero on attach as
2378 * it were.
2379 ************************************************************************/
2380 static void
2381 ixv_save_stats(struct adapter *adapter)
2382 {
2383 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2384
2385 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2386 stats->saved_reset_vfgprc +=
2387 stats->vfgprc.ev_count - stats->base_vfgprc;
2388 stats->saved_reset_vfgptc +=
2389 stats->vfgptc.ev_count - stats->base_vfgptc;
2390 stats->saved_reset_vfgorc +=
2391 stats->vfgorc.ev_count - stats->base_vfgorc;
2392 stats->saved_reset_vfgotc +=
2393 stats->vfgotc.ev_count - stats->base_vfgotc;
2394 stats->saved_reset_vfmprc +=
2395 stats->vfmprc.ev_count - stats->base_vfmprc;
2396 }
2397 } /* ixv_save_stats */
2398
2399 /************************************************************************
2400 * ixv_init_stats
2401 ************************************************************************/
2402 static void
2403 ixv_init_stats(struct adapter *adapter)
2404 {
2405 struct ixgbe_hw *hw = &adapter->hw;
2406
2407 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2408 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2409 adapter->stats.vf.last_vfgorc |=
2410 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2411
2412 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2413 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2414 adapter->stats.vf.last_vfgotc |=
2415 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2416
2417 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2418
2419 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2420 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2421 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2422 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2423 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2424 } /* ixv_init_stats */
2425
2426 #define UPDATE_STAT_32(reg, last, count) \
2427 { \
2428 u32 current = IXGBE_READ_REG(hw, (reg)); \
2429 if (current < (last)) \
2430 count.ev_count += 0x100000000LL; \
2431 (last) = current; \
2432 count.ev_count &= 0xFFFFFFFF00000000LL; \
2433 count.ev_count |= current; \
2434 }
2435
2436 #define UPDATE_STAT_36(lsb, msb, last, count) \
2437 { \
2438 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2439 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2440 u64 current = ((cur_msb << 32) | cur_lsb); \
2441 if (current < (last)) \
2442 count.ev_count += 0x1000000000LL; \
2443 (last) = current; \
2444 count.ev_count &= 0xFFFFFFF000000000LL; \
2445 count.ev_count |= current; \
2446 }
2447
2448 /************************************************************************
2449 * ixv_update_stats - Update the board statistics counters.
2450 ************************************************************************/
2451 void
2452 ixv_update_stats(struct adapter *adapter)
2453 {
2454 struct ixgbe_hw *hw = &adapter->hw;
2455 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2456
2457 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2458 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2459 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2460 stats->vfgorc);
2461 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2462 stats->vfgotc);
2463 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2464
2465 /* VF doesn't count errors by hardware */
2466
2467 } /* ixv_update_stats */
2468
2469 /************************************************************************
2470 * ixv_sysctl_interrupt_rate_handler
2471 ************************************************************************/
2472 static int
2473 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2474 {
2475 struct sysctlnode node = *rnode;
2476 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2477 struct adapter *adapter = que->adapter;
2478 uint32_t reg, usec, rate;
2479 int error;
2480
2481 if (que == NULL)
2482 return 0;
2483 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2484 usec = ((reg & 0x0FF8) >> 3);
2485 if (usec > 0)
2486 rate = 500000 / usec;
2487 else
2488 rate = 0;
2489 node.sysctl_data = &rate;
2490 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2491 if (error || newp == NULL)
2492 return error;
2493 reg &= ~0xfff; /* default, no limitation */
2494 if (rate > 0 && rate < 500000) {
2495 if (rate < 1000)
2496 rate = 1000;
2497 reg |= ((4000000 / rate) & 0xff8);
2498 /*
2499 * When RSC is used, ITR interval must be larger than
2500 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2501 * The minimum value is always greater than 2us on 100M
2502 * (and 10M?(not documented)), but it's not on 1G and higher.
2503 */
2504 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2505 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2506 if ((adapter->num_queues > 1)
2507 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2508 return EINVAL;
2509 }
2510 ixv_max_interrupt_rate = rate;
2511 } else
2512 ixv_max_interrupt_rate = 0;
2513 ixv_eitr_write(adapter, que->msix, reg);
2514
2515 return (0);
2516 } /* ixv_sysctl_interrupt_rate_handler */
2517
2518 const struct sysctlnode *
2519 ixv_sysctl_instance(struct adapter *adapter)
2520 {
2521 const char *dvname;
2522 struct sysctllog **log;
2523 int rc;
2524 const struct sysctlnode *rnode;
2525
2526 log = &adapter->sysctllog;
2527 dvname = device_xname(adapter->dev);
2528
2529 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2530 0, CTLTYPE_NODE, dvname,
2531 SYSCTL_DESCR("ixv information and settings"),
2532 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2533 goto err;
2534
2535 return rnode;
2536 err:
2537 device_printf(adapter->dev,
2538 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2539 return NULL;
2540 }
2541
2542 static void
2543 ixv_add_device_sysctls(struct adapter *adapter)
2544 {
2545 struct sysctllog **log;
2546 const struct sysctlnode *rnode, *cnode;
2547 device_t dev;
2548
2549 dev = adapter->dev;
2550 log = &adapter->sysctllog;
2551
2552 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2553 aprint_error_dev(dev, "could not create sysctl root\n");
2554 return;
2555 }
2556
2557 if (sysctl_createv(log, 0, &rnode, &cnode,
2558 CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
2559 SYSCTL_DESCR("Debug Info"),
2560 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2561 aprint_error_dev(dev, "could not create sysctl\n");
2562
2563 if (sysctl_createv(log, 0, &rnode, &cnode,
2564 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
2565 SYSCTL_DESCR("Number of jumbo buffers per queue"),
2566 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
2567 CTL_EOL) != 0)
2568 aprint_error_dev(dev, "could not create sysctl\n");
2569
2570 if (sysctl_createv(log, 0, &rnode, &cnode,
2571 CTLFLAG_READWRITE, CTLTYPE_BOOL, "enable_aim",
2572 SYSCTL_DESCR("Interrupt Moderation"),
2573 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2574 aprint_error_dev(dev, "could not create sysctl\n");
2575
2576 if (sysctl_createv(log, 0, &rnode, &cnode,
2577 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
2578 SYSCTL_DESCR("Use workqueue for packet processing"),
2579 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL)
2580 != 0)
2581 aprint_error_dev(dev, "could not create sysctl\n");
2582 }
2583
2584 /************************************************************************
2585 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2586 ************************************************************************/
2587 static void
2588 ixv_add_stats_sysctls(struct adapter *adapter)
2589 {
2590 device_t dev = adapter->dev;
2591 struct tx_ring *txr = adapter->tx_rings;
2592 struct rx_ring *rxr = adapter->rx_rings;
2593 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2594 struct ixgbe_hw *hw = &adapter->hw;
2595 const struct sysctlnode *rnode, *cnode;
2596 struct sysctllog **log = &adapter->sysctllog;
2597 const char *xname = device_xname(dev);
2598
2599 /* Driver Statistics */
2600 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2601 NULL, xname, "Driver tx dma soft fail EFBIG");
2602 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2603 NULL, xname, "m_defrag() failed");
2604 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2605 NULL, xname, "Driver tx dma hard fail EFBIG");
2606 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2607 NULL, xname, "Driver tx dma hard fail EINVAL");
2608 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2609 NULL, xname, "Driver tx dma hard fail other");
2610 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2611 NULL, xname, "Driver tx dma soft fail EAGAIN");
2612 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2613 NULL, xname, "Driver tx dma soft fail ENOMEM");
2614 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2615 NULL, xname, "Watchdog timeouts");
2616 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2617 NULL, xname, "TSO errors");
2618 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
2619 NULL, xname, "Admin MSI-X IRQ Handled");
2620 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
2621 NULL, xname, "Admin event");
2622
2623 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2624 snprintf(adapter->queues[i].evnamebuf,
2625 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2626 xname, i);
2627 snprintf(adapter->queues[i].namebuf,
2628 sizeof(adapter->queues[i].namebuf), "q%d", i);
2629
2630 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2631 aprint_error_dev(dev, "could not create sysctl root\n");
2632 break;
2633 }
2634
2635 if (sysctl_createv(log, 0, &rnode, &rnode,
2636 0, CTLTYPE_NODE,
2637 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2638 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2639 break;
2640
2641 if (sysctl_createv(log, 0, &rnode, &cnode,
2642 CTLFLAG_READWRITE, CTLTYPE_INT,
2643 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2644 ixv_sysctl_interrupt_rate_handler, 0,
2645 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2646 break;
2647
2648 if (sysctl_createv(log, 0, &rnode, &cnode,
2649 CTLFLAG_READONLY, CTLTYPE_INT,
2650 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2651 ixv_sysctl_tdh_handler, 0, (void *)txr,
2652 0, CTL_CREATE, CTL_EOL) != 0)
2653 break;
2654
2655 if (sysctl_createv(log, 0, &rnode, &cnode,
2656 CTLFLAG_READONLY, CTLTYPE_INT,
2657 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2658 ixv_sysctl_tdt_handler, 0, (void *)txr,
2659 0, CTL_CREATE, CTL_EOL) != 0)
2660 break;
2661
2662 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2663 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2664 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2665 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2666 "Handled queue in softint");
2667 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2668 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2669 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2670 NULL, adapter->queues[i].evnamebuf, "TSO");
2671 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2672 NULL, adapter->queues[i].evnamebuf,
2673 "TX Queue No Descriptor Available");
2674 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2675 NULL, adapter->queues[i].evnamebuf,
2676 "Queue Packets Transmitted");
2677 #ifndef IXGBE_LEGACY_TX
2678 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2679 NULL, adapter->queues[i].evnamebuf,
2680 "Packets dropped in pcq");
2681 #endif
2682
2683 #ifdef LRO
2684 struct lro_ctrl *lro = &rxr->lro;
2685 #endif /* LRO */
2686
2687 if (sysctl_createv(log, 0, &rnode, &cnode,
2688 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
2689 SYSCTL_DESCR("Receive Descriptor next to check"),
2690 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2691 CTL_CREATE, CTL_EOL) != 0)
2692 break;
2693
2694 if (sysctl_createv(log, 0, &rnode, &cnode,
2695 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
2696 SYSCTL_DESCR("Receive Descriptor Head"),
2697 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2698 CTL_CREATE, CTL_EOL) != 0)
2699 break;
2700
2701 if (sysctl_createv(log, 0, &rnode, &cnode,
2702 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
2703 SYSCTL_DESCR("Receive Descriptor Tail"),
2704 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2705 CTL_CREATE, CTL_EOL) != 0)
2706 break;
2707
2708 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2709 NULL, adapter->queues[i].evnamebuf,
2710 "Queue Packets Received");
2711 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2712 NULL, adapter->queues[i].evnamebuf,
2713 "Queue Bytes Received");
2714 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2715 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2716 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2717 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2718 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2719 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2720 #ifdef LRO
2721 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2722 CTLFLAG_RD, &lro->lro_queued, 0,
2723 "LRO Queued");
2724 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2725 CTLFLAG_RD, &lro->lro_flushed, 0,
2726 "LRO Flushed");
2727 #endif /* LRO */
2728 }
2729
2730 /* MAC stats get their own sub node */
2731
2732 snprintf(stats->namebuf,
2733 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2734
2735 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2736 stats->namebuf, "rx csum offload - IP");
2737 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2738 stats->namebuf, "rx csum offload - L4");
2739 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2740 stats->namebuf, "rx csum offload - IP bad");
2741 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2742 stats->namebuf, "rx csum offload - L4 bad");
2743
2744 /* Packet Reception Stats */
2745 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2746 xname, "Good Packets Received");
2747 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2748 xname, "Good Octets Received");
2749 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2750 xname, "Multicast Packets Received");
2751 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2752 xname, "Good Packets Transmitted");
2753 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2754 xname, "Good Octets Transmitted");
2755
2756 /* Mailbox Stats */
2757 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2758 xname, "message TXs");
2759 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2760 xname, "message RXs");
2761 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2762 xname, "ACKs");
2763 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2764 xname, "REQs");
2765 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2766 xname, "RSTs");
2767
2768 } /* ixv_add_stats_sysctls */
2769
2770 static void
2771 ixv_clear_evcnt(struct adapter *adapter)
2772 {
2773 struct tx_ring *txr = adapter->tx_rings;
2774 struct rx_ring *rxr = adapter->rx_rings;
2775 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2776 struct ixgbe_hw *hw = &adapter->hw;
2777 int i;
2778
2779 /* Driver Statistics */
2780 adapter->efbig_tx_dma_setup.ev_count = 0;
2781 adapter->mbuf_defrag_failed.ev_count = 0;
2782 adapter->efbig2_tx_dma_setup.ev_count = 0;
2783 adapter->einval_tx_dma_setup.ev_count = 0;
2784 adapter->other_tx_dma_setup.ev_count = 0;
2785 adapter->eagain_tx_dma_setup.ev_count = 0;
2786 adapter->enomem_tx_dma_setup.ev_count = 0;
2787 adapter->watchdog_events.ev_count = 0;
2788 adapter->tso_err.ev_count = 0;
2789 adapter->admin_irqev.ev_count = 0;
2790 adapter->link_workev.ev_count = 0;
2791
2792 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2793 adapter->queues[i].irqs.ev_count = 0;
2794 adapter->queues[i].handleq.ev_count = 0;
2795 adapter->queues[i].req.ev_count = 0;
2796 txr->tso_tx.ev_count = 0;
2797 txr->no_desc_avail.ev_count = 0;
2798 txr->total_packets.ev_count = 0;
2799 #ifndef IXGBE_LEGACY_TX
2800 txr->pcq_drops.ev_count = 0;
2801 #endif
2802 txr->q_efbig_tx_dma_setup = 0;
2803 txr->q_mbuf_defrag_failed = 0;
2804 txr->q_efbig2_tx_dma_setup = 0;
2805 txr->q_einval_tx_dma_setup = 0;
2806 txr->q_other_tx_dma_setup = 0;
2807 txr->q_eagain_tx_dma_setup = 0;
2808 txr->q_enomem_tx_dma_setup = 0;
2809 txr->q_tso_err = 0;
2810
2811 rxr->rx_packets.ev_count = 0;
2812 rxr->rx_bytes.ev_count = 0;
2813 rxr->rx_copies.ev_count = 0;
2814 rxr->no_jmbuf.ev_count = 0;
2815 rxr->rx_discarded.ev_count = 0;
2816 }
2817
2818 /* MAC stats get their own sub node */
2819
2820 stats->ipcs.ev_count = 0;
2821 stats->l4cs.ev_count = 0;
2822 stats->ipcs_bad.ev_count = 0;
2823 stats->l4cs_bad.ev_count = 0;
2824
2825 /* Packet Reception Stats */
2826 stats->vfgprc.ev_count = 0;
2827 stats->vfgorc.ev_count = 0;
2828 stats->vfmprc.ev_count = 0;
2829 stats->vfgptc.ev_count = 0;
2830 stats->vfgotc.ev_count = 0;
2831
2832 /* Mailbox Stats */
2833 hw->mbx.stats.msgs_tx.ev_count = 0;
2834 hw->mbx.stats.msgs_rx.ev_count = 0;
2835 hw->mbx.stats.acks.ev_count = 0;
2836 hw->mbx.stats.reqs.ev_count = 0;
2837 hw->mbx.stats.rsts.ev_count = 0;
2838
2839 } /* ixv_clear_evcnt */
2840
2841 /************************************************************************
2842 * ixv_set_sysctl_value
2843 ************************************************************************/
2844 static void
2845 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2846 const char *description, int *limit, int value)
2847 {
2848 device_t dev = adapter->dev;
2849 struct sysctllog **log;
2850 const struct sysctlnode *rnode, *cnode;
2851
2852 log = &adapter->sysctllog;
2853 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2854 aprint_error_dev(dev, "could not create sysctl root\n");
2855 return;
2856 }
2857 if (sysctl_createv(log, 0, &rnode, &cnode,
2858 CTLFLAG_READWRITE, CTLTYPE_INT,
2859 name, SYSCTL_DESCR(description),
2860 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2861 aprint_error_dev(dev, "could not create sysctl\n");
2862 *limit = value;
2863 } /* ixv_set_sysctl_value */
2864
2865 /************************************************************************
2866 * ixv_print_debug_info
2867 *
2868 * Called only when em_display_debug_stats is enabled.
2869 * Provides a way to take a look at important statistics
2870 * maintained by the driver and hardware.
2871 ************************************************************************/
2872 static void
2873 ixv_print_debug_info(struct adapter *adapter)
2874 {
2875 device_t dev = adapter->dev;
2876 struct ix_queue *que = adapter->queues;
2877 struct rx_ring *rxr;
2878 struct tx_ring *txr;
2879 #ifdef LRO
2880 struct lro_ctrl *lro;
2881 #endif /* LRO */
2882
2883 for (int i = 0; i < adapter->num_queues; i++, que++) {
2884 txr = que->txr;
2885 rxr = que->rxr;
2886 #ifdef LRO
2887 lro = &rxr->lro;
2888 #endif /* LRO */
2889 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2890 que->msix, (long)que->irqs.ev_count);
2891 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2892 rxr->me, (long long)rxr->rx_packets.ev_count);
2893 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2894 rxr->me, (long)rxr->rx_bytes.ev_count);
2895 #ifdef LRO
2896 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2897 rxr->me, (uintmax_t)lro->lro_queued);
2898 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2899 rxr->me, (uintmax_t)lro->lro_flushed);
2900 #endif /* LRO */
2901 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2902 txr->me, (long)txr->total_packets.ev_count);
2903 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2904 txr->me, (long)txr->no_desc_avail.ev_count);
2905 }
2906
2907 device_printf(dev, "Admin IRQ Handled: %lu\n",
2908 (long)adapter->admin_irqev.ev_count);
2909 device_printf(dev, "Admin work Handled: %lu\n",
2910 (long)adapter->link_workev.ev_count);
2911 } /* ixv_print_debug_info */
2912
2913 /************************************************************************
2914 * ixv_sysctl_debug
2915 ************************************************************************/
2916 static int
2917 ixv_sysctl_debug(SYSCTLFN_ARGS)
2918 {
2919 struct sysctlnode node = *rnode;
2920 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2921 int error, result;
2922
2923 node.sysctl_data = &result;
2924 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2925
2926 if (error || newp == NULL)
2927 return error;
2928
2929 if (result == 1)
2930 ixv_print_debug_info(adapter);
2931
2932 return 0;
2933 } /* ixv_sysctl_debug */
2934
2935 /************************************************************************
2936 * ixv_init_device_features
2937 ************************************************************************/
2938 static void
2939 ixv_init_device_features(struct adapter *adapter)
2940 {
2941 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2942 | IXGBE_FEATURE_VF
2943 | IXGBE_FEATURE_RSS
2944 | IXGBE_FEATURE_LEGACY_TX;
2945
2946 /* A tad short on feature flags for VFs, atm. */
2947 switch (adapter->hw.mac.type) {
2948 case ixgbe_mac_82599_vf:
2949 break;
2950 case ixgbe_mac_X540_vf:
2951 break;
2952 case ixgbe_mac_X550_vf:
2953 case ixgbe_mac_X550EM_x_vf:
2954 case ixgbe_mac_X550EM_a_vf:
2955 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2956 break;
2957 default:
2958 break;
2959 }
2960
2961 /* Enabled by default... */
2962 /* Is a virtual function (VF) */
2963 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2964 adapter->feat_en |= IXGBE_FEATURE_VF;
2965 /* Netmap */
2966 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2967 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2968 /* Receive-Side Scaling (RSS) */
2969 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2970 adapter->feat_en |= IXGBE_FEATURE_RSS;
2971 /* Needs advanced context descriptor regardless of offloads req'd */
2972 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2973 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2974
2975 /* Enabled via sysctl... */
2976 /* Legacy (single queue) transmit */
2977 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2978 ixv_enable_legacy_tx)
2979 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2980 } /* ixv_init_device_features */
2981
2982 /************************************************************************
2983 * ixv_shutdown - Shutdown entry point
2984 ************************************************************************/
2985 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2986 static int
2987 ixv_shutdown(device_t dev)
2988 {
2989 struct adapter *adapter = device_private(dev);
2990 IXGBE_CORE_LOCK(adapter);
2991 ixv_stop_locked(adapter);
2992 IXGBE_CORE_UNLOCK(adapter);
2993
2994 return (0);
2995 } /* ixv_shutdown */
2996 #endif
2997
2998 static int
2999 ixv_ifflags_cb(struct ethercom *ec)
3000 {
3001 struct ifnet *ifp = &ec->ec_if;
3002 struct adapter *adapter = ifp->if_softc;
3003 u_short saved_flags;
3004 u_short change;
3005 int rv = 0;
3006
3007 IXGBE_CORE_LOCK(adapter);
3008
3009 saved_flags = adapter->if_flags;
3010 change = ifp->if_flags ^ adapter->if_flags;
3011 if (change != 0)
3012 adapter->if_flags = ifp->if_flags;
3013
3014 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3015 rv = ENETRESET;
3016 goto out;
3017 } else if ((change & IFF_PROMISC) != 0) {
3018 rv = ixv_set_rxfilter(adapter);
3019 if (rv != 0) {
3020 /* Restore previous */
3021 adapter->if_flags = saved_flags;
3022 goto out;
3023 }
3024 }
3025
3026 /* Check for ec_capenable. */
3027 change = ec->ec_capenable ^ adapter->ec_capenable;
3028 adapter->ec_capenable = ec->ec_capenable;
3029 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
3030 | ETHERCAP_VLAN_HWFILTER)) != 0) {
3031 rv = ENETRESET;
3032 goto out;
3033 }
3034
3035 /*
3036 * Special handling is not required for ETHERCAP_VLAN_MTU.
3037 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
3038 */
3039
3040 /* Set up VLAN support and filter */
3041 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
3042 rv = ixv_setup_vlan_support(adapter);
3043
3044 out:
3045 IXGBE_CORE_UNLOCK(adapter);
3046
3047 return rv;
3048 }
3049
3050
3051 /************************************************************************
3052 * ixv_ioctl - Ioctl entry point
3053 *
3054 * Called when the user wants to configure the interface.
3055 *
3056 * return 0 on success, positive on failure
3057 ************************************************************************/
3058 static int
3059 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
3060 {
3061 struct adapter *adapter = ifp->if_softc;
3062 struct ixgbe_hw *hw = &adapter->hw;
3063 struct ifcapreq *ifcr = data;
3064 int error;
3065 int l4csum_en;
3066 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
3067 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3068
3069 switch (command) {
3070 case SIOCSIFFLAGS:
3071 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
3072 break;
3073 case SIOCADDMULTI: {
3074 struct ether_multi *enm;
3075 struct ether_multistep step;
3076 struct ethercom *ec = &adapter->osdep.ec;
3077 bool overflow = false;
3078 int mcnt = 0;
3079
3080 /*
3081 * Check the number of multicast address. If it exceeds,
3082 * return ENOSPC.
3083 * Update this code when we support API 1.3.
3084 */
3085 ETHER_LOCK(ec);
3086 ETHER_FIRST_MULTI(step, ec, enm);
3087 while (enm != NULL) {
3088 mcnt++;
3089
3090 /*
3091 * This code is before adding, so one room is required
3092 * at least.
3093 */
3094 if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3095 overflow = true;
3096 break;
3097 }
3098 ETHER_NEXT_MULTI(step, enm);
3099 }
3100 ETHER_UNLOCK(ec);
3101 error = 0;
3102 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3103 error = hw->mac.ops.update_xcast_mode(hw,
3104 IXGBEVF_XCAST_MODE_ALLMULTI);
3105 if (error == IXGBE_ERR_NOT_TRUSTED) {
3106 device_printf(adapter->dev,
3107 "this interface is not trusted\n");
3108 error = EPERM;
3109 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3110 device_printf(adapter->dev,
3111 "the PF doesn't support allmulti mode\n");
3112 error = EOPNOTSUPP;
3113 } else if (error) {
3114 device_printf(adapter->dev,
3115 "number of Ethernet multicast addresses "
3116 "exceeds the limit (%d). error = %d\n",
3117 IXGBE_MAX_VF_MC, error);
3118 error = ENOSPC;
3119 } else
3120 ec->ec_flags |= ETHER_F_ALLMULTI;
3121 }
3122 if (error)
3123 return error;
3124 }
3125 /*FALLTHROUGH*/
3126 case SIOCDELMULTI:
3127 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3128 break;
3129 case SIOCSIFMEDIA:
3130 case SIOCGIFMEDIA:
3131 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3132 break;
3133 case SIOCSIFCAP:
3134 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3135 break;
3136 case SIOCSIFMTU:
3137 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3138 break;
3139 case SIOCZIFDATA:
3140 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3141 ixv_update_stats(adapter);
3142 ixv_clear_evcnt(adapter);
3143 break;
3144 default:
3145 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3146 break;
3147 }
3148
3149 switch (command) {
3150 case SIOCSIFCAP:
3151 /* Layer-4 Rx checksum offload has to be turned on and
3152 * off as a unit.
3153 */
3154 l4csum_en = ifcr->ifcr_capenable & l4csum;
3155 if (l4csum_en != l4csum && l4csum_en != 0)
3156 return EINVAL;
3157 /*FALLTHROUGH*/
3158 case SIOCADDMULTI:
3159 case SIOCDELMULTI:
3160 case SIOCSIFFLAGS:
3161 case SIOCSIFMTU:
3162 default:
3163 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3164 return error;
3165 if ((ifp->if_flags & IFF_RUNNING) == 0)
3166 ;
3167 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3168 IXGBE_CORE_LOCK(adapter);
3169 ixv_init_locked(adapter);
3170 IXGBE_CORE_UNLOCK(adapter);
3171 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3172 /*
3173 * Multicast list has changed; set the hardware filter
3174 * accordingly.
3175 */
3176 IXGBE_CORE_LOCK(adapter);
3177 ixv_disable_intr(adapter);
3178 ixv_set_rxfilter(adapter);
3179 ixv_enable_intr(adapter);
3180 IXGBE_CORE_UNLOCK(adapter);
3181 }
3182 return 0;
3183 }
3184 } /* ixv_ioctl */
3185
3186 /************************************************************************
3187 * ixv_init
3188 ************************************************************************/
3189 static int
3190 ixv_init(struct ifnet *ifp)
3191 {
3192 struct adapter *adapter = ifp->if_softc;
3193
3194 IXGBE_CORE_LOCK(adapter);
3195 ixv_init_locked(adapter);
3196 IXGBE_CORE_UNLOCK(adapter);
3197
3198 return 0;
3199 } /* ixv_init */
3200
3201 /************************************************************************
3202 * ixv_handle_que
3203 ************************************************************************/
3204 static void
3205 ixv_handle_que(void *context)
3206 {
3207 struct ix_queue *que = context;
3208 struct adapter *adapter = que->adapter;
3209 struct tx_ring *txr = que->txr;
3210 struct ifnet *ifp = adapter->ifp;
3211 bool more;
3212
3213 que->handleq.ev_count++;
3214
3215 if (ifp->if_flags & IFF_RUNNING) {
3216 more = ixgbe_rxeof(que);
3217 IXGBE_TX_LOCK(txr);
3218 more |= ixgbe_txeof(txr);
3219 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
3220 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3221 ixgbe_mq_start_locked(ifp, txr);
3222 /* Only for queue 0 */
3223 /* NetBSD still needs this for CBQ */
3224 if ((&adapter->queues[0] == que)
3225 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3226 ixgbe_legacy_start_locked(ifp, txr);
3227 IXGBE_TX_UNLOCK(txr);
3228 if (more) {
3229 que->req.ev_count++;
3230 if (adapter->txrx_use_workqueue) {
3231 /*
3232 * "enqueued flag" is not required here
3233 * the same as ixg(4). See ixgbe_msix_que().
3234 */
3235 workqueue_enqueue(adapter->que_wq,
3236 &que->wq_cookie, curcpu());
3237 } else
3238 softint_schedule(que->que_si);
3239 return;
3240 }
3241 }
3242
3243 /* Re-enable this interrupt */
3244 ixv_enable_queue(adapter, que->msix);
3245
3246 return;
3247 } /* ixv_handle_que */
3248
3249 /************************************************************************
3250 * ixv_handle_que_work
3251 ************************************************************************/
3252 static void
3253 ixv_handle_que_work(struct work *wk, void *context)
3254 {
3255 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3256
3257 /*
3258 * "enqueued flag" is not required here the same as ixg(4).
3259 * See ixgbe_msix_que().
3260 */
3261 ixv_handle_que(que);
3262 }
3263
3264 /************************************************************************
3265 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3266 ************************************************************************/
3267 static int
3268 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
3269 {
3270 device_t dev = adapter->dev;
3271 struct ix_queue *que = adapter->queues;
3272 struct tx_ring *txr = adapter->tx_rings;
3273 int error, msix_ctrl, rid, vector = 0;
3274 pci_chipset_tag_t pc;
3275 pcitag_t tag;
3276 char intrbuf[PCI_INTRSTR_LEN];
3277 char wqname[MAXCOMLEN];
3278 char intr_xname[32];
3279 const char *intrstr = NULL;
3280 kcpuset_t *affinity;
3281 int cpu_id = 0;
3282
3283 pc = adapter->osdep.pc;
3284 tag = adapter->osdep.tag;
3285
3286 adapter->osdep.nintrs = adapter->num_queues + 1;
3287 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3288 adapter->osdep.nintrs) != 0) {
3289 aprint_error_dev(dev,
3290 "failed to allocate MSI-X interrupt\n");
3291 return (ENXIO);
3292 }
3293
3294 kcpuset_create(&affinity, false);
3295 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3296 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3297 device_xname(dev), i);
3298 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3299 sizeof(intrbuf));
3300 #ifdef IXGBE_MPSAFE
3301 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3302 true);
3303 #endif
3304 /* Set the handler function */
3305 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3306 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3307 intr_xname);
3308 if (que->res == NULL) {
3309 pci_intr_release(pc, adapter->osdep.intrs,
3310 adapter->osdep.nintrs);
3311 aprint_error_dev(dev,
3312 "Failed to register QUE handler\n");
3313 kcpuset_destroy(affinity);
3314 return (ENXIO);
3315 }
3316 que->msix = vector;
3317 adapter->active_queues |= (u64)(1 << que->msix);
3318
3319 cpu_id = i;
3320 /* Round-robin affinity */
3321 kcpuset_zero(affinity);
3322 kcpuset_set(affinity, cpu_id % ncpu);
3323 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3324 NULL);
3325 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3326 intrstr);
3327 if (error == 0)
3328 aprint_normal(", bound queue %d to cpu %d\n",
3329 i, cpu_id % ncpu);
3330 else
3331 aprint_normal("\n");
3332
3333 #ifndef IXGBE_LEGACY_TX
3334 txr->txr_si
3335 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3336 ixgbe_deferred_mq_start, txr);
3337 #endif
3338 que->que_si
3339 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3340 ixv_handle_que, que);
3341 if (que->que_si == NULL) {
3342 aprint_error_dev(dev,
3343 "could not establish software interrupt\n");
3344 }
3345 }
3346 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3347 error = workqueue_create(&adapter->txr_wq, wqname,
3348 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3349 IXGBE_WORKQUEUE_FLAGS);
3350 if (error) {
3351 aprint_error_dev(dev,
3352 "couldn't create workqueue for deferred Tx\n");
3353 }
3354 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3355
3356 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3357 error = workqueue_create(&adapter->que_wq, wqname,
3358 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3359 IXGBE_WORKQUEUE_FLAGS);
3360 if (error) {
3361 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
3362 }
3363
3364 /* and Mailbox */
3365 cpu_id++;
3366 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3367 adapter->vector = vector;
3368 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3369 sizeof(intrbuf));
3370 #ifdef IXGBE_MPSAFE
3371 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3372 true);
3373 #endif
3374 /* Set the mbx handler function */
3375 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3376 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3377 intr_xname);
3378 if (adapter->osdep.ihs[vector] == NULL) {
3379 aprint_error_dev(dev, "Failed to register LINK handler\n");
3380 kcpuset_destroy(affinity);
3381 return (ENXIO);
3382 }
3383 /* Round-robin affinity */
3384 kcpuset_zero(affinity);
3385 kcpuset_set(affinity, cpu_id % ncpu);
3386 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
3387 NULL);
3388
3389 aprint_normal_dev(dev,
3390 "for link, interrupting at %s", intrstr);
3391 if (error == 0)
3392 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3393 else
3394 aprint_normal("\n");
3395
3396 /* Tasklets for Mailbox */
3397 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
3398 error = workqueue_create(&adapter->admin_wq, wqname,
3399 ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3400 IXGBE_TASKLET_WQ_FLAGS);
3401 if (error) {
3402 aprint_error_dev(dev,
3403 "could not create admin workqueue (%d)\n", error);
3404 goto err_out;
3405 }
3406
3407 /*
3408 * Due to a broken design QEMU will fail to properly
3409 * enable the guest for MSI-X unless the vectors in
3410 * the table are all set up, so we must rewrite the
3411 * ENABLE in the MSI-X control register again at this
3412 * point to cause it to successfully initialize us.
3413 */
3414 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3415 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3416 rid += PCI_MSIX_CTL;
3417 msix_ctrl = pci_conf_read(pc, tag, rid);
3418 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3419 pci_conf_write(pc, tag, rid, msix_ctrl);
3420 }
3421
3422 kcpuset_destroy(affinity);
3423 return (0);
3424 err_out:
3425 kcpuset_destroy(affinity);
3426 ixv_free_deferred_handlers(adapter);
3427 ixv_free_pci_resources(adapter);
3428 return (error);
3429 } /* ixv_allocate_msix */
3430
3431 /************************************************************************
3432 * ixv_configure_interrupts - Setup MSI-X resources
3433 *
3434 * Note: The VF device MUST use MSI-X, there is no fallback.
3435 ************************************************************************/
3436 static int
3437 ixv_configure_interrupts(struct adapter *adapter)
3438 {
3439 device_t dev = adapter->dev;
3440 int want, queues, msgs;
3441
3442 /* Must have at least 2 MSI-X vectors */
3443 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3444 if (msgs < 2) {
3445 aprint_error_dev(dev, "MSIX config error\n");
3446 return (ENXIO);
3447 }
3448 msgs = MIN(msgs, IXG_MAX_NINTR);
3449
3450 /* Figure out a reasonable auto config value */
3451 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3452
3453 if (ixv_num_queues != 0)
3454 queues = ixv_num_queues;
3455 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3456 queues = IXGBE_VF_MAX_TX_QUEUES;
3457
3458 /*
3459 * Want vectors for the queues,
3460 * plus an additional for mailbox.
3461 */
3462 want = queues + 1;
3463 if (msgs >= want)
3464 msgs = want;
3465 else {
3466 aprint_error_dev(dev,
3467 "MSI-X Configuration Problem, "
3468 "%d vectors but %d queues wanted!\n",
3469 msgs, want);
3470 return -1;
3471 }
3472
3473 adapter->msix_mem = (void *)1; /* XXX */
3474 aprint_normal_dev(dev,
3475 "Using MSI-X interrupts with %d vectors\n", msgs);
3476 adapter->num_queues = queues;
3477
3478 return (0);
3479 } /* ixv_configure_interrupts */
3480
3481
3482 /************************************************************************
3483 * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
3484 *
3485 * Done outside of interrupt context since the driver might sleep
3486 ************************************************************************/
3487 static void
3488 ixv_handle_admin(struct work *wk, void *context)
3489 {
3490 struct adapter *adapter = context;
3491 struct ixgbe_hw *hw = &adapter->hw;
3492
3493 IXGBE_CORE_LOCK(adapter);
3494
3495 ++adapter->link_workev.ev_count;
3496 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3497 &adapter->link_up, FALSE);
3498 ixv_update_link_status(adapter);
3499
3500 adapter->task_requests = 0;
3501 atomic_store_relaxed(&adapter->admin_pending, 0);
3502
3503 /* Re-enable interrupts */
3504 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
3505
3506 IXGBE_CORE_UNLOCK(adapter);
3507 } /* ixv_handle_admin */
3508
3509 /************************************************************************
3510 * ixv_check_link - Used in the local timer to poll for link changes
3511 ************************************************************************/
3512 static s32
3513 ixv_check_link(struct adapter *adapter)
3514 {
3515 s32 error;
3516
3517 KASSERT(mutex_owned(&adapter->core_mtx));
3518
3519 adapter->hw.mac.get_link_status = TRUE;
3520
3521 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3522 &adapter->link_speed, &adapter->link_up, FALSE);
3523 ixv_update_link_status(adapter);
3524
3525 return error;
3526 } /* ixv_check_link */
3527