ixv.c revision 1.91 1 /*$NetBSD: ixv.c,v 1.91 2018/04/04 08:13:07 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 328265 2018-01-22 20:56:21Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct ix_queue *, uint32_t);
122
123 static void ixv_setup_vlan_support(struct adapter *);
124 #if 0
125 static void ixv_register_vlan(void *, struct ifnet *, u16);
126 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
127 #endif
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134
135
136 /* Sysctl handlers */
137 static void ixv_set_sysctl_value(struct adapter *, const char *,
138 const char *, int *, int);
139 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
140 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
144
145 /* The MSI-X Interrupt handlers */
146 static int ixv_msix_que(void *);
147 static int ixv_msix_mbx(void *);
148
149 /* Deferred interrupt tasklets */
150 static void ixv_handle_que(void *);
151 static void ixv_handle_link(void *);
152
153 /* Workqueue handler for deferred work */
154 static void ixv_handle_que_work(struct work *, void *);
155
156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
157 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
158
159 /************************************************************************
160 * FreeBSD Device Interface Entry Points
161 ************************************************************************/
162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
164 DVF_DETACH_SHUTDOWN);
165
166 #if 0
167 static driver_t ixv_driver = {
168 "ixv", ixv_methods, sizeof(struct adapter),
169 };
170
171 devclass_t ixv_devclass;
172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
175 #endif
176
177 /*
178 * TUNEABLE PARAMETERS:
179 */
180
181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
182 static int ixv_num_queues = 0;
183 #define TUNABLE_INT(__x, __y)
184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
185
186 /*
187 * AIM: Adaptive Interrupt Moderation
188 * which means that the interrupt rate
189 * is varied over time based on the
190 * traffic for that interrupt vector
191 */
192 static bool ixv_enable_aim = false;
193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194
195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
197
198 /* How many packets rxeof tries to clean at a time */
199 static int ixv_rx_process_limit = 256;
200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201
202 /* How many packets txeof tries to clean at a time */
203 static int ixv_tx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
205
206 /* Which pakcet processing uses workqueue or softint */
207 static bool ixv_txrx_workqueue = false;
208
209 /*
210 * Number of TX descriptors per ring,
211 * setting higher than RX as this seems
212 * the better performing choice.
213 */
214 static int ixv_txd = PERFORM_TXD;
215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
216
217 /* Number of RX descriptors per ring */
218 static int ixv_rxd = PERFORM_RXD;
219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
220
221 /* Legacy Transmit (single queue) */
222 static int ixv_enable_legacy_tx = 0;
223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
224
225 #ifdef NET_MPSAFE
226 #define IXGBE_MPSAFE 1
227 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
228 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
229 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
230 #else
231 #define IXGBE_CALLOUT_FLAGS 0
232 #define IXGBE_SOFTINFT_FLAGS 0
233 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
234 #endif
235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
236
237 #if 0
238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
240 #endif
241
242 /************************************************************************
243 * ixv_probe - Device identification routine
244 *
245 * Determines if the driver should be loaded on
246 * adapter based on its PCI vendor/device ID.
247 *
248 * return BUS_PROBE_DEFAULT on success, positive on failure
249 ************************************************************************/
250 static int
251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
252 {
253 #ifdef __HAVE_PCI_MSI_MSIX
254 const struct pci_attach_args *pa = aux;
255
256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
257 #else
258 return 0;
259 #endif
260 } /* ixv_probe */
261
262 static ixgbe_vendor_info_t *
263 ixv_lookup(const struct pci_attach_args *pa)
264 {
265 ixgbe_vendor_info_t *ent;
266 pcireg_t subid;
267
268 INIT_DEBUGOUT("ixv_lookup: begin");
269
270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
271 return NULL;
272
273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
274
275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
279 (ent->subvendor_id == 0)) &&
280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
281 (ent->subdevice_id == 0))) {
282 return ent;
283 }
284 }
285
286 return NULL;
287 }
288
289 /************************************************************************
290 * ixv_attach - Device initialization routine
291 *
292 * Called when the driver is being loaded.
293 * Identifies the type of hardware, allocates all resources
294 * and initializes the hardware.
295 *
296 * return 0 on success, positive on failure
297 ************************************************************************/
298 static void
299 ixv_attach(device_t parent, device_t dev, void *aux)
300 {
301 struct adapter *adapter;
302 struct ixgbe_hw *hw;
303 int error = 0;
304 pcireg_t id, subid;
305 ixgbe_vendor_info_t *ent;
306 const struct pci_attach_args *pa = aux;
307 const char *apivstr;
308 const char *str;
309 char buf[256];
310
311 INIT_DEBUGOUT("ixv_attach: begin");
312
313 /*
314 * Make sure BUSMASTER is set, on a VM under
315 * KVM it may not be and will break things.
316 */
317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
318
319 /* Allocate, clear, and link in our adapter structure */
320 adapter = device_private(dev);
321 adapter->dev = dev;
322 adapter->hw.back = adapter;
323 hw = &adapter->hw;
324
325 adapter->init_locked = ixv_init_locked;
326 adapter->stop_locked = ixv_stop;
327
328 adapter->osdep.pc = pa->pa_pc;
329 adapter->osdep.tag = pa->pa_tag;
330 if (pci_dma64_available(pa))
331 adapter->osdep.dmat = pa->pa_dmat64;
332 else
333 adapter->osdep.dmat = pa->pa_dmat;
334 adapter->osdep.attached = false;
335
336 ent = ixv_lookup(pa);
337
338 KASSERT(ent != NULL);
339
340 aprint_normal(": %s, Version - %s\n",
341 ixv_strings[ent->index], ixv_driver_version);
342
343 /* Core Lock Init*/
344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
345
346 /* Do base PCI setup - map BAR0 */
347 if (ixv_allocate_pci_resources(adapter, pa)) {
348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
349 error = ENXIO;
350 goto err_out;
351 }
352
353 /* SYSCTL APIs */
354 ixv_add_device_sysctls(adapter);
355
356 /* Set up the timer callout */
357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
358
359 /* Save off the information about this board */
360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
362 hw->vendor_id = PCI_VENDOR(id);
363 hw->device_id = PCI_PRODUCT(id);
364 hw->revision_id =
365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
368
369 /* A subset of set_mac_type */
370 switch (hw->device_id) {
371 case IXGBE_DEV_ID_82599_VF:
372 hw->mac.type = ixgbe_mac_82599_vf;
373 str = "82599 VF";
374 break;
375 case IXGBE_DEV_ID_X540_VF:
376 hw->mac.type = ixgbe_mac_X540_vf;
377 str = "X540 VF";
378 break;
379 case IXGBE_DEV_ID_X550_VF:
380 hw->mac.type = ixgbe_mac_X550_vf;
381 str = "X550 VF";
382 break;
383 case IXGBE_DEV_ID_X550EM_X_VF:
384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
385 str = "X550EM X VF";
386 break;
387 case IXGBE_DEV_ID_X550EM_A_VF:
388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
389 str = "X550EM A VF";
390 break;
391 default:
392 /* Shouldn't get here since probe succeeded */
393 aprint_error_dev(dev, "Unknown device ID!\n");
394 error = ENXIO;
395 goto err_out;
396 break;
397 }
398 aprint_normal_dev(dev, "device %s\n", str);
399
400 ixv_init_device_features(adapter);
401
402 /* Initialize the shared code */
403 error = ixgbe_init_ops_vf(hw);
404 if (error) {
405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
406 error = EIO;
407 goto err_out;
408 }
409
410 /* Setup the mailbox */
411 ixgbe_init_mbx_params_vf(hw);
412
413 /* Set the right number of segments */
414 adapter->num_segs = IXGBE_82599_SCATTER;
415
416 /* Reset mbox api to 1.0 */
417 error = hw->mac.ops.reset_hw(hw);
418 if (error == IXGBE_ERR_RESET_FAILED)
419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
420 else if (error)
421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
422 error);
423 if (error) {
424 error = EIO;
425 goto err_out;
426 }
427
428 error = hw->mac.ops.init_hw(hw);
429 if (error) {
430 aprint_error_dev(dev, "...init_hw() failed!\n");
431 error = EIO;
432 goto err_out;
433 }
434
435 /* Negotiate mailbox API version */
436 error = ixv_negotiate_api(adapter);
437 if (error)
438 aprint_normal_dev(dev,
439 "MBX API negotiation failed during attach!\n");
440 switch (hw->api_version) {
441 case ixgbe_mbox_api_10:
442 apivstr = "1.0";
443 break;
444 case ixgbe_mbox_api_20:
445 apivstr = "2.0";
446 break;
447 case ixgbe_mbox_api_11:
448 apivstr = "1.1";
449 break;
450 case ixgbe_mbox_api_12:
451 apivstr = "1.2";
452 break;
453 case ixgbe_mbox_api_13:
454 apivstr = "1.3";
455 break;
456 default:
457 apivstr = "unknown";
458 break;
459 }
460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
461
462 /* If no mac address was assigned, make a random one */
463 if (!ixv_check_ether_addr(hw->mac.addr)) {
464 u8 addr[ETHER_ADDR_LEN];
465 uint64_t rndval = cprng_strong64();
466
467 memcpy(addr, &rndval, sizeof(addr));
468 addr[0] &= 0xFE;
469 addr[0] |= 0x02;
470 bcopy(addr, hw->mac.addr, sizeof(addr));
471 }
472
473 /* Register for VLAN events */
474 #if 0 /* XXX delete after write? */
475 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
476 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
477 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
478 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
479 #endif
480
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixv_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixv_rx_process_limit);
485
486 ixv_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixv_tx_process_limit);
489
490 /* Do descriptor calc and sanity checks */
491 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
493 aprint_error_dev(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
495 } else
496 adapter->num_tx_desc = ixv_txd;
497
498 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
499 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
500 aprint_error_dev(dev, "RXD config issue, using default!\n");
501 adapter->num_rx_desc = DEFAULT_RXD;
502 } else
503 adapter->num_rx_desc = ixv_rxd;
504
505 /* Setup MSI-X */
506 error = ixv_configure_interrupts(adapter);
507 if (error)
508 goto err_out;
509
510 /* Allocate our TX/RX Queues */
511 if (ixgbe_allocate_queues(adapter)) {
512 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
513 error = ENOMEM;
514 goto err_out;
515 }
516
517 /* hw.ix defaults init */
518 adapter->enable_aim = ixv_enable_aim;
519
520 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
521
522 error = ixv_allocate_msix(adapter, pa);
523 if (error) {
524 device_printf(dev, "ixv_allocate_msix() failed!\n");
525 goto err_late;
526 }
527
528 /* Setup OS specific network interface */
529 error = ixv_setup_interface(dev, adapter);
530 if (error != 0) {
531 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
532 goto err_late;
533 }
534
535 /* Do the stats setup */
536 ixv_save_stats(adapter);
537 ixv_init_stats(adapter);
538 ixv_add_stats_sysctls(adapter);
539
540 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
541 ixgbe_netmap_attach(adapter);
542
543 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
544 aprint_verbose_dev(dev, "feature cap %s\n", buf);
545 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
546 aprint_verbose_dev(dev, "feature ena %s\n", buf);
547
548 INIT_DEBUGOUT("ixv_attach: end");
549 adapter->osdep.attached = true;
550
551 return;
552
553 err_late:
554 ixgbe_free_transmit_structures(adapter);
555 ixgbe_free_receive_structures(adapter);
556 free(adapter->queues, M_DEVBUF);
557 err_out:
558 ixv_free_pci_resources(adapter);
559 IXGBE_CORE_LOCK_DESTROY(adapter);
560
561 return;
562 } /* ixv_attach */
563
564 /************************************************************************
565 * ixv_detach - Device removal routine
566 *
567 * Called when the driver is being removed.
568 * Stops the adapter and deallocates all the resources
569 * that were allocated for driver operation.
570 *
571 * return 0 on success, positive on failure
572 ************************************************************************/
573 static int
574 ixv_detach(device_t dev, int flags)
575 {
576 struct adapter *adapter = device_private(dev);
577 struct ixgbe_hw *hw = &adapter->hw;
578 struct ix_queue *que = adapter->queues;
579 struct tx_ring *txr = adapter->tx_rings;
580 struct rx_ring *rxr = adapter->rx_rings;
581 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
582
583 INIT_DEBUGOUT("ixv_detach: begin");
584 if (adapter->osdep.attached == false)
585 return 0;
586
587 /* Stop the interface. Callouts are stopped in it. */
588 ixv_ifstop(adapter->ifp, 1);
589
590 #if NVLAN > 0
591 /* Make sure VLANs are not using driver */
592 if (!VLAN_ATTACHED(&adapter->osdep.ec))
593 ; /* nothing to do: no VLANs */
594 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
595 vlan_ifdetach(adapter->ifp);
596 else {
597 aprint_error_dev(dev, "VLANs in use, detach first\n");
598 return EBUSY;
599 }
600 #endif
601
602 IXGBE_CORE_LOCK(adapter);
603 ixv_stop(adapter);
604 IXGBE_CORE_UNLOCK(adapter);
605
606 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
607 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
608 softint_disestablish(txr->txr_si);
609 softint_disestablish(que->que_si);
610 }
611 if (adapter->txr_wq != NULL)
612 workqueue_destroy(adapter->txr_wq);
613 if (adapter->txr_wq_enqueued != NULL)
614 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
615 if (adapter->que_wq != NULL)
616 workqueue_destroy(adapter->que_wq);
617
618 /* Drain the Mailbox(link) queue */
619 softint_disestablish(adapter->link_si);
620
621 /* Unregister VLAN events */
622 #if 0 /* XXX msaitoh delete after write? */
623 if (adapter->vlan_attach != NULL)
624 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
625 if (adapter->vlan_detach != NULL)
626 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
627 #endif
628
629 ether_ifdetach(adapter->ifp);
630 callout_halt(&adapter->timer, NULL);
631
632 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
633 netmap_detach(adapter->ifp);
634
635 ixv_free_pci_resources(adapter);
636 #if 0 /* XXX the NetBSD port is probably missing something here */
637 bus_generic_detach(dev);
638 #endif
639 if_detach(adapter->ifp);
640 if_percpuq_destroy(adapter->ipq);
641
642 sysctl_teardown(&adapter->sysctllog);
643 evcnt_detach(&adapter->efbig_tx_dma_setup);
644 evcnt_detach(&adapter->mbuf_defrag_failed);
645 evcnt_detach(&adapter->efbig2_tx_dma_setup);
646 evcnt_detach(&adapter->einval_tx_dma_setup);
647 evcnt_detach(&adapter->other_tx_dma_setup);
648 evcnt_detach(&adapter->eagain_tx_dma_setup);
649 evcnt_detach(&adapter->enomem_tx_dma_setup);
650 evcnt_detach(&adapter->watchdog_events);
651 evcnt_detach(&adapter->tso_err);
652 evcnt_detach(&adapter->link_irq);
653
654 txr = adapter->tx_rings;
655 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
656 evcnt_detach(&adapter->queues[i].irqs);
657 evcnt_detach(&adapter->queues[i].handleq);
658 evcnt_detach(&adapter->queues[i].req);
659 evcnt_detach(&txr->no_desc_avail);
660 evcnt_detach(&txr->total_packets);
661 evcnt_detach(&txr->tso_tx);
662 #ifndef IXGBE_LEGACY_TX
663 evcnt_detach(&txr->pcq_drops);
664 #endif
665
666 evcnt_detach(&rxr->rx_packets);
667 evcnt_detach(&rxr->rx_bytes);
668 evcnt_detach(&rxr->rx_copies);
669 evcnt_detach(&rxr->no_jmbuf);
670 evcnt_detach(&rxr->rx_discarded);
671 }
672 evcnt_detach(&stats->ipcs);
673 evcnt_detach(&stats->l4cs);
674 evcnt_detach(&stats->ipcs_bad);
675 evcnt_detach(&stats->l4cs_bad);
676
677 /* Packet Reception Stats */
678 evcnt_detach(&stats->vfgorc);
679 evcnt_detach(&stats->vfgprc);
680 evcnt_detach(&stats->vfmprc);
681
682 /* Packet Transmission Stats */
683 evcnt_detach(&stats->vfgotc);
684 evcnt_detach(&stats->vfgptc);
685
686 /* Mailbox Stats */
687 evcnt_detach(&hw->mbx.stats.msgs_tx);
688 evcnt_detach(&hw->mbx.stats.msgs_rx);
689 evcnt_detach(&hw->mbx.stats.acks);
690 evcnt_detach(&hw->mbx.stats.reqs);
691 evcnt_detach(&hw->mbx.stats.rsts);
692
693 ixgbe_free_transmit_structures(adapter);
694 ixgbe_free_receive_structures(adapter);
695 for (int i = 0; i < adapter->num_queues; i++) {
696 struct ix_queue *lque = &adapter->queues[i];
697 mutex_destroy(&lque->dc_mtx);
698 }
699 free(adapter->queues, M_DEVBUF);
700
701 IXGBE_CORE_LOCK_DESTROY(adapter);
702
703 return (0);
704 } /* ixv_detach */
705
706 /************************************************************************
707 * ixv_init_locked - Init entry point
708 *
709 * Used in two ways: It is used by the stack as an init entry
710 * point in network interface structure. It is also used
711 * by the driver as a hw/sw initialization routine to get
712 * to a consistent state.
713 *
714 * return 0 on success, positive on failure
715 ************************************************************************/
716 static void
717 ixv_init_locked(struct adapter *adapter)
718 {
719 struct ifnet *ifp = adapter->ifp;
720 device_t dev = adapter->dev;
721 struct ixgbe_hw *hw = &adapter->hw;
722 struct ix_queue *que = adapter->queues;
723 int error = 0;
724 uint32_t mask;
725 int i;
726
727 INIT_DEBUGOUT("ixv_init_locked: begin");
728 KASSERT(mutex_owned(&adapter->core_mtx));
729 hw->adapter_stopped = FALSE;
730 hw->mac.ops.stop_adapter(hw);
731 callout_stop(&adapter->timer);
732
733 /* reprogram the RAR[0] in case user changed it. */
734 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
735
736 /* Get the latest mac address, User can use a LAA */
737 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
738 IXGBE_ETH_LENGTH_OF_ADDRESS);
739 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
740
741 /* Prepare transmit descriptors and buffers */
742 if (ixgbe_setup_transmit_structures(adapter)) {
743 aprint_error_dev(dev, "Could not setup transmit structures\n");
744 ixv_stop(adapter);
745 return;
746 }
747
748 /* Reset VF and renegotiate mailbox API version */
749 hw->mac.ops.reset_hw(hw);
750 error = ixv_negotiate_api(adapter);
751 if (error)
752 device_printf(dev,
753 "Mailbox API negotiation failed in init_locked!\n");
754
755 ixv_initialize_transmit_units(adapter);
756
757 /* Setup Multicast table */
758 ixv_set_multi(adapter);
759
760 /*
761 * Determine the correct mbuf pool
762 * for doing jumbo/headersplit
763 */
764 if (ifp->if_mtu > ETHERMTU)
765 adapter->rx_mbuf_sz = MJUMPAGESIZE;
766 else
767 adapter->rx_mbuf_sz = MCLBYTES;
768
769 /* Prepare receive descriptors and buffers */
770 if (ixgbe_setup_receive_structures(adapter)) {
771 device_printf(dev, "Could not setup receive structures\n");
772 ixv_stop(adapter);
773 return;
774 }
775
776 /* Configure RX settings */
777 ixv_initialize_receive_units(adapter);
778
779 #if 0 /* XXX isn't it required? -- msaitoh */
780 /* Set the various hardware offload abilities */
781 ifp->if_hwassist = 0;
782 if (ifp->if_capenable & IFCAP_TSO4)
783 ifp->if_hwassist |= CSUM_TSO;
784 if (ifp->if_capenable & IFCAP_TXCSUM) {
785 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
786 #if __FreeBSD_version >= 800000
787 ifp->if_hwassist |= CSUM_SCTP;
788 #endif
789 }
790 #endif
791
792 /* Set up VLAN offload and filter */
793 ixv_setup_vlan_support(adapter);
794
795 /* Set up MSI-X routing */
796 ixv_configure_ivars(adapter);
797
798 /* Set up auto-mask */
799 mask = (1 << adapter->vector);
800 for (i = 0; i < adapter->num_queues; i++, que++)
801 mask |= (1 << que->msix);
802 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
803
804 /* Set moderation on the Link interrupt */
805 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
806
807 /* Stats init */
808 ixv_init_stats(adapter);
809
810 /* Config/Enable Link */
811 hw->mac.get_link_status = TRUE;
812 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
813 FALSE);
814
815 /* Start watchdog */
816 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
817
818 /* And now turn on interrupts */
819 ixv_enable_intr(adapter);
820
821 /* Update saved flags. See ixgbe_ifflags_cb() */
822 adapter->if_flags = ifp->if_flags;
823
824 /* Now inform the stack we're ready */
825 ifp->if_flags |= IFF_RUNNING;
826 ifp->if_flags &= ~IFF_OACTIVE;
827
828 return;
829 } /* ixv_init_locked */
830
831 /************************************************************************
832 * ixv_enable_queue
833 ************************************************************************/
834 static inline void
835 ixv_enable_queue(struct adapter *adapter, u32 vector)
836 {
837 struct ixgbe_hw *hw = &adapter->hw;
838 struct ix_queue *que = &adapter->queues[vector];
839 u32 queue = 1 << vector;
840 u32 mask;
841
842 mutex_enter(&que->dc_mtx);
843 if (que->disabled_count > 0 && --que->disabled_count > 0)
844 goto out;
845
846 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
847 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
848 out:
849 mutex_exit(&que->dc_mtx);
850 } /* ixv_enable_queue */
851
852 /************************************************************************
853 * ixv_disable_queue
854 ************************************************************************/
855 static inline void
856 ixv_disable_queue(struct adapter *adapter, u32 vector)
857 {
858 struct ixgbe_hw *hw = &adapter->hw;
859 struct ix_queue *que = &adapter->queues[vector];
860 u64 queue = (u64)(1 << vector);
861 u32 mask;
862
863 mutex_enter(&que->dc_mtx);
864 if (que->disabled_count++ > 0)
865 goto out;
866
867 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
868 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
869 out:
870 mutex_exit(&que->dc_mtx);
871 } /* ixv_disable_queue */
872
873 static inline void
874 ixv_rearm_queues(struct adapter *adapter, u64 queues)
875 {
876 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
877 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
878 } /* ixv_rearm_queues */
879
880
881 /************************************************************************
882 * ixv_msix_que - MSI-X Queue Interrupt Service routine
883 ************************************************************************/
884 static int
885 ixv_msix_que(void *arg)
886 {
887 struct ix_queue *que = arg;
888 struct adapter *adapter = que->adapter;
889 struct tx_ring *txr = que->txr;
890 struct rx_ring *rxr = que->rxr;
891 bool more;
892 u32 newitr = 0;
893
894 ixv_disable_queue(adapter, que->msix);
895 ++que->irqs.ev_count;
896
897 #ifdef __NetBSD__
898 /* Don't run ixgbe_rxeof in interrupt context */
899 more = true;
900 #else
901 more = ixgbe_rxeof(que);
902 #endif
903
904 IXGBE_TX_LOCK(txr);
905 ixgbe_txeof(txr);
906 IXGBE_TX_UNLOCK(txr);
907
908 /* Do AIM now? */
909
910 if (adapter->enable_aim == false)
911 goto no_calc;
912 /*
913 * Do Adaptive Interrupt Moderation:
914 * - Write out last calculated setting
915 * - Calculate based on average size over
916 * the last interval.
917 */
918 if (que->eitr_setting)
919 ixv_eitr_write(que, que->eitr_setting);
920
921 que->eitr_setting = 0;
922
923 /* Idle, do nothing */
924 if ((txr->bytes == 0) && (rxr->bytes == 0))
925 goto no_calc;
926
927 if ((txr->bytes) && (txr->packets))
928 newitr = txr->bytes/txr->packets;
929 if ((rxr->bytes) && (rxr->packets))
930 newitr = max(newitr, (rxr->bytes / rxr->packets));
931 newitr += 24; /* account for hardware frame, crc */
932
933 /* set an upper boundary */
934 newitr = min(newitr, 3000);
935
936 /* Be nice to the mid range */
937 if ((newitr > 300) && (newitr < 1200))
938 newitr = (newitr / 3);
939 else
940 newitr = (newitr / 2);
941
942 /*
943 * When RSC is used, ITR interval must be larger than RSC_DELAY.
944 * Currently, we use 2us for RSC_DELAY. The minimum value is always
945 * greater than 2us on 100M (and 10M?(not documented)), but it's not
946 * on 1G and higher.
947 */
948 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
949 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
950 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
951 newitr = IXGBE_MIN_RSC_EITR_10G1G;
952 }
953
954 /* save for next interrupt */
955 que->eitr_setting = newitr;
956
957 /* Reset state */
958 txr->bytes = 0;
959 txr->packets = 0;
960 rxr->bytes = 0;
961 rxr->packets = 0;
962
963 no_calc:
964 if (more)
965 softint_schedule(que->que_si);
966 else /* Re-enable this interrupt */
967 ixv_enable_queue(adapter, que->msix);
968
969 return 1;
970 } /* ixv_msix_que */
971
972 /************************************************************************
973 * ixv_msix_mbx
974 ************************************************************************/
975 static int
976 ixv_msix_mbx(void *arg)
977 {
978 struct adapter *adapter = arg;
979 struct ixgbe_hw *hw = &adapter->hw;
980
981 ++adapter->link_irq.ev_count;
982 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
983
984 /* Link status change */
985 hw->mac.get_link_status = TRUE;
986 softint_schedule(adapter->link_si);
987
988 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
989
990 return 1;
991 } /* ixv_msix_mbx */
992
993 static void
994 ixv_eitr_write(struct ix_queue *que, uint32_t itr)
995 {
996 struct adapter *adapter = que->adapter;
997
998 /*
999 * Newer devices than 82598 have VF function, so this function is
1000 * simple.
1001 */
1002 itr |= IXGBE_EITR_CNT_WDIS;
1003
1004 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), itr);
1005 }
1006
1007
1008 /************************************************************************
1009 * ixv_media_status - Media Ioctl callback
1010 *
1011 * Called whenever the user queries the status of
1012 * the interface using ifconfig.
1013 ************************************************************************/
1014 static void
1015 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1016 {
1017 struct adapter *adapter = ifp->if_softc;
1018
1019 INIT_DEBUGOUT("ixv_media_status: begin");
1020 IXGBE_CORE_LOCK(adapter);
1021 ixv_update_link_status(adapter);
1022
1023 ifmr->ifm_status = IFM_AVALID;
1024 ifmr->ifm_active = IFM_ETHER;
1025
1026 if (!adapter->link_active) {
1027 ifmr->ifm_active |= IFM_NONE;
1028 IXGBE_CORE_UNLOCK(adapter);
1029 return;
1030 }
1031
1032 ifmr->ifm_status |= IFM_ACTIVE;
1033
1034 switch (adapter->link_speed) {
1035 case IXGBE_LINK_SPEED_10GB_FULL:
1036 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1037 break;
1038 case IXGBE_LINK_SPEED_5GB_FULL:
1039 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1040 break;
1041 case IXGBE_LINK_SPEED_2_5GB_FULL:
1042 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1043 break;
1044 case IXGBE_LINK_SPEED_1GB_FULL:
1045 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1046 break;
1047 case IXGBE_LINK_SPEED_100_FULL:
1048 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1049 break;
1050 case IXGBE_LINK_SPEED_10_FULL:
1051 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1052 break;
1053 }
1054
1055 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1056
1057 IXGBE_CORE_UNLOCK(adapter);
1058 } /* ixv_media_status */
1059
1060 /************************************************************************
1061 * ixv_media_change - Media Ioctl callback
1062 *
1063 * Called when the user changes speed/duplex using
1064 * media/mediopt option with ifconfig.
1065 ************************************************************************/
1066 static int
1067 ixv_media_change(struct ifnet *ifp)
1068 {
1069 struct adapter *adapter = ifp->if_softc;
1070 struct ifmedia *ifm = &adapter->media;
1071
1072 INIT_DEBUGOUT("ixv_media_change: begin");
1073
1074 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1075 return (EINVAL);
1076
1077 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1078 case IFM_AUTO:
1079 break;
1080 default:
1081 device_printf(adapter->dev, "Only auto media type\n");
1082 return (EINVAL);
1083 }
1084
1085 return (0);
1086 } /* ixv_media_change */
1087
1088
1089 /************************************************************************
1090 * ixv_negotiate_api
1091 *
1092 * Negotiate the Mailbox API with the PF;
1093 * start with the most featured API first.
1094 ************************************************************************/
1095 static int
1096 ixv_negotiate_api(struct adapter *adapter)
1097 {
1098 struct ixgbe_hw *hw = &adapter->hw;
1099 int mbx_api[] = { ixgbe_mbox_api_11,
1100 ixgbe_mbox_api_10,
1101 ixgbe_mbox_api_unknown };
1102 int i = 0;
1103
1104 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1105 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1106 return (0);
1107 i++;
1108 }
1109
1110 return (EINVAL);
1111 } /* ixv_negotiate_api */
1112
1113
1114 /************************************************************************
1115 * ixv_set_multi - Multicast Update
1116 *
1117 * Called whenever multicast address list is updated.
1118 ************************************************************************/
1119 static void
1120 ixv_set_multi(struct adapter *adapter)
1121 {
1122 struct ether_multi *enm;
1123 struct ether_multistep step;
1124 struct ethercom *ec = &adapter->osdep.ec;
1125 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1126 u8 *update_ptr;
1127 int mcnt = 0;
1128
1129 KASSERT(mutex_owned(&adapter->core_mtx));
1130 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1131
1132 ETHER_LOCK(ec);
1133 ETHER_FIRST_MULTI(step, ec, enm);
1134 while (enm != NULL) {
1135 bcopy(enm->enm_addrlo,
1136 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1137 IXGBE_ETH_LENGTH_OF_ADDRESS);
1138 mcnt++;
1139 /* XXX This might be required --msaitoh */
1140 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1141 break;
1142 ETHER_NEXT_MULTI(step, enm);
1143 }
1144 ETHER_UNLOCK(ec);
1145
1146 update_ptr = mta;
1147
1148 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1149 ixv_mc_array_itr, TRUE);
1150 } /* ixv_set_multi */
1151
1152 /************************************************************************
1153 * ixv_mc_array_itr
1154 *
1155 * An iterator function needed by the multicast shared code.
1156 * It feeds the shared code routine the addresses in the
1157 * array of ixv_set_multi() one by one.
1158 ************************************************************************/
1159 static u8 *
1160 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1161 {
1162 u8 *addr = *update_ptr;
1163 u8 *newptr;
1164
1165 *vmdq = 0;
1166
1167 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1168 *update_ptr = newptr;
1169
1170 return addr;
1171 } /* ixv_mc_array_itr */
1172
1173 /************************************************************************
1174 * ixv_local_timer - Timer routine
1175 *
1176 * Checks for link status, updates statistics,
1177 * and runs the watchdog check.
1178 ************************************************************************/
1179 static void
1180 ixv_local_timer(void *arg)
1181 {
1182 struct adapter *adapter = arg;
1183
1184 IXGBE_CORE_LOCK(adapter);
1185 ixv_local_timer_locked(adapter);
1186 IXGBE_CORE_UNLOCK(adapter);
1187 }
1188
1189 static void
1190 ixv_local_timer_locked(void *arg)
1191 {
1192 struct adapter *adapter = arg;
1193 device_t dev = adapter->dev;
1194 struct ix_queue *que = adapter->queues;
1195 u64 queues = 0;
1196 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1197 int hung = 0;
1198 int i;
1199
1200 KASSERT(mutex_owned(&adapter->core_mtx));
1201
1202 ixv_check_link(adapter);
1203
1204 /* Stats Update */
1205 ixv_update_stats(adapter);
1206
1207 /* Update some event counters */
1208 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1209 que = adapter->queues;
1210 for (i = 0; i < adapter->num_queues; i++, que++) {
1211 struct tx_ring *txr = que->txr;
1212
1213 v0 += txr->q_efbig_tx_dma_setup;
1214 v1 += txr->q_mbuf_defrag_failed;
1215 v2 += txr->q_efbig2_tx_dma_setup;
1216 v3 += txr->q_einval_tx_dma_setup;
1217 v4 += txr->q_other_tx_dma_setup;
1218 v5 += txr->q_eagain_tx_dma_setup;
1219 v6 += txr->q_enomem_tx_dma_setup;
1220 v7 += txr->q_tso_err;
1221 }
1222 adapter->efbig_tx_dma_setup.ev_count = v0;
1223 adapter->mbuf_defrag_failed.ev_count = v1;
1224 adapter->efbig2_tx_dma_setup.ev_count = v2;
1225 adapter->einval_tx_dma_setup.ev_count = v3;
1226 adapter->other_tx_dma_setup.ev_count = v4;
1227 adapter->eagain_tx_dma_setup.ev_count = v5;
1228 adapter->enomem_tx_dma_setup.ev_count = v6;
1229 adapter->tso_err.ev_count = v7;
1230
1231 /*
1232 * Check the TX queues status
1233 * - mark hung queues so we don't schedule on them
1234 * - watchdog only if all queues show hung
1235 */
1236 que = adapter->queues;
1237 for (i = 0; i < adapter->num_queues; i++, que++) {
1238 /* Keep track of queues with work for soft irq */
1239 if (que->txr->busy)
1240 queues |= ((u64)1 << que->me);
1241 /*
1242 * Each time txeof runs without cleaning, but there
1243 * are uncleaned descriptors it increments busy. If
1244 * we get to the MAX we declare it hung.
1245 */
1246 if (que->busy == IXGBE_QUEUE_HUNG) {
1247 ++hung;
1248 /* Mark the queue as inactive */
1249 adapter->active_queues &= ~((u64)1 << que->me);
1250 continue;
1251 } else {
1252 /* Check if we've come back from hung */
1253 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1254 adapter->active_queues |= ((u64)1 << que->me);
1255 }
1256 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1257 device_printf(dev,
1258 "Warning queue %d appears to be hung!\n", i);
1259 que->txr->busy = IXGBE_QUEUE_HUNG;
1260 ++hung;
1261 }
1262 }
1263
1264 /* Only truly watchdog if all queues show hung */
1265 if (hung == adapter->num_queues)
1266 goto watchdog;
1267 else if (queues != 0) { /* Force an IRQ on queues with work */
1268 ixv_rearm_queues(adapter, queues);
1269 }
1270
1271 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1272
1273 return;
1274
1275 watchdog:
1276
1277 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1278 adapter->ifp->if_flags &= ~IFF_RUNNING;
1279 adapter->watchdog_events.ev_count++;
1280 ixv_init_locked(adapter);
1281 } /* ixv_local_timer */
1282
1283 /************************************************************************
1284 * ixv_update_link_status - Update OS on link state
1285 *
1286 * Note: Only updates the OS on the cached link state.
1287 * The real check of the hardware only happens with
1288 * a link interrupt.
1289 ************************************************************************/
1290 static void
1291 ixv_update_link_status(struct adapter *adapter)
1292 {
1293 struct ifnet *ifp = adapter->ifp;
1294 device_t dev = adapter->dev;
1295
1296 KASSERT(mutex_owned(&adapter->core_mtx));
1297
1298 if (adapter->link_up) {
1299 if (adapter->link_active == FALSE) {
1300 if (bootverbose) {
1301 const char *bpsmsg;
1302
1303 switch (adapter->link_speed) {
1304 case IXGBE_LINK_SPEED_10GB_FULL:
1305 bpsmsg = "10 Gbps";
1306 break;
1307 case IXGBE_LINK_SPEED_5GB_FULL:
1308 bpsmsg = "5 Gbps";
1309 break;
1310 case IXGBE_LINK_SPEED_2_5GB_FULL:
1311 bpsmsg = "2.5 Gbps";
1312 break;
1313 case IXGBE_LINK_SPEED_1GB_FULL:
1314 bpsmsg = "1 Gbps";
1315 break;
1316 case IXGBE_LINK_SPEED_100_FULL:
1317 bpsmsg = "100 Mbps";
1318 break;
1319 case IXGBE_LINK_SPEED_10_FULL:
1320 bpsmsg = "10 Mbps";
1321 break;
1322 default:
1323 bpsmsg = "unknown speed";
1324 break;
1325 }
1326 device_printf(dev, "Link is up %s %s \n",
1327 bpsmsg, "Full Duplex");
1328 }
1329 adapter->link_active = TRUE;
1330 if_link_state_change(ifp, LINK_STATE_UP);
1331 }
1332 } else { /* Link down */
1333 if (adapter->link_active == TRUE) {
1334 if (bootverbose)
1335 device_printf(dev, "Link is Down\n");
1336 if_link_state_change(ifp, LINK_STATE_DOWN);
1337 adapter->link_active = FALSE;
1338 }
1339 }
1340 } /* ixv_update_link_status */
1341
1342
1343 /************************************************************************
1344 * ixv_stop - Stop the hardware
1345 *
1346 * Disables all traffic on the adapter by issuing a
1347 * global reset on the MAC and deallocates TX/RX buffers.
1348 ************************************************************************/
1349 static void
1350 ixv_ifstop(struct ifnet *ifp, int disable)
1351 {
1352 struct adapter *adapter = ifp->if_softc;
1353
1354 IXGBE_CORE_LOCK(adapter);
1355 ixv_stop(adapter);
1356 IXGBE_CORE_UNLOCK(adapter);
1357 }
1358
1359 static void
1360 ixv_stop(void *arg)
1361 {
1362 struct ifnet *ifp;
1363 struct adapter *adapter = arg;
1364 struct ixgbe_hw *hw = &adapter->hw;
1365
1366 ifp = adapter->ifp;
1367
1368 KASSERT(mutex_owned(&adapter->core_mtx));
1369
1370 INIT_DEBUGOUT("ixv_stop: begin\n");
1371 ixv_disable_intr(adapter);
1372
1373 /* Tell the stack that the interface is no longer active */
1374 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1375
1376 hw->mac.ops.reset_hw(hw);
1377 adapter->hw.adapter_stopped = FALSE;
1378 hw->mac.ops.stop_adapter(hw);
1379 callout_stop(&adapter->timer);
1380
1381 /* reprogram the RAR[0] in case user changed it. */
1382 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1383
1384 return;
1385 } /* ixv_stop */
1386
1387
1388 /************************************************************************
1389 * ixv_allocate_pci_resources
1390 ************************************************************************/
1391 static int
1392 ixv_allocate_pci_resources(struct adapter *adapter,
1393 const struct pci_attach_args *pa)
1394 {
1395 pcireg_t memtype;
1396 device_t dev = adapter->dev;
1397 bus_addr_t addr;
1398 int flags;
1399
1400 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1401 switch (memtype) {
1402 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1403 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1404 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1405 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1406 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1407 goto map_err;
1408 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1409 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1410 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1411 }
1412 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1413 adapter->osdep.mem_size, flags,
1414 &adapter->osdep.mem_bus_space_handle) != 0) {
1415 map_err:
1416 adapter->osdep.mem_size = 0;
1417 aprint_error_dev(dev, "unable to map BAR0\n");
1418 return ENXIO;
1419 }
1420 break;
1421 default:
1422 aprint_error_dev(dev, "unexpected type on BAR0\n");
1423 return ENXIO;
1424 }
1425
1426 /* Pick up the tuneable queues */
1427 adapter->num_queues = ixv_num_queues;
1428
1429 return (0);
1430 } /* ixv_allocate_pci_resources */
1431
1432 /************************************************************************
1433 * ixv_free_pci_resources
1434 ************************************************************************/
1435 static void
1436 ixv_free_pci_resources(struct adapter * adapter)
1437 {
1438 struct ix_queue *que = adapter->queues;
1439 int rid;
1440
1441 /*
1442 * Release all msix queue resources:
1443 */
1444 for (int i = 0; i < adapter->num_queues; i++, que++) {
1445 if (que->res != NULL)
1446 pci_intr_disestablish(adapter->osdep.pc,
1447 adapter->osdep.ihs[i]);
1448 }
1449
1450
1451 /* Clean the Mailbox interrupt last */
1452 rid = adapter->vector;
1453
1454 if (adapter->osdep.ihs[rid] != NULL) {
1455 pci_intr_disestablish(adapter->osdep.pc,
1456 adapter->osdep.ihs[rid]);
1457 adapter->osdep.ihs[rid] = NULL;
1458 }
1459
1460 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1461 adapter->osdep.nintrs);
1462
1463 if (adapter->osdep.mem_size != 0) {
1464 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1465 adapter->osdep.mem_bus_space_handle,
1466 adapter->osdep.mem_size);
1467 }
1468
1469 return;
1470 } /* ixv_free_pci_resources */
1471
1472 /************************************************************************
1473 * ixv_setup_interface
1474 *
1475 * Setup networking device structure and register an interface.
1476 ************************************************************************/
1477 static int
1478 ixv_setup_interface(device_t dev, struct adapter *adapter)
1479 {
1480 struct ethercom *ec = &adapter->osdep.ec;
1481 struct ifnet *ifp;
1482 int rv;
1483
1484 INIT_DEBUGOUT("ixv_setup_interface: begin");
1485
1486 ifp = adapter->ifp = &ec->ec_if;
1487 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1488 ifp->if_baudrate = IF_Gbps(10);
1489 ifp->if_init = ixv_init;
1490 ifp->if_stop = ixv_ifstop;
1491 ifp->if_softc = adapter;
1492 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1493 #ifdef IXGBE_MPSAFE
1494 ifp->if_extflags = IFEF_MPSAFE;
1495 #endif
1496 ifp->if_ioctl = ixv_ioctl;
1497 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1498 #if 0
1499 ixv_start_locked = ixgbe_legacy_start_locked;
1500 #endif
1501 } else {
1502 ifp->if_transmit = ixgbe_mq_start;
1503 #if 0
1504 ixv_start_locked = ixgbe_mq_start_locked;
1505 #endif
1506 }
1507 ifp->if_start = ixgbe_legacy_start;
1508 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1509 IFQ_SET_READY(&ifp->if_snd);
1510
1511 rv = if_initialize(ifp);
1512 if (rv != 0) {
1513 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1514 return rv;
1515 }
1516 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1517 ether_ifattach(ifp, adapter->hw.mac.addr);
1518 /*
1519 * We use per TX queue softint, so if_deferred_start_init() isn't
1520 * used.
1521 */
1522 if_register(ifp);
1523 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1524
1525 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1526
1527 /*
1528 * Tell the upper layer(s) we support long frames.
1529 */
1530 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1531
1532 /* Set capability flags */
1533 ifp->if_capabilities |= IFCAP_HWCSUM
1534 | IFCAP_TSOv4
1535 | IFCAP_TSOv6;
1536 ifp->if_capenable = 0;
1537
1538 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1539 | ETHERCAP_VLAN_HWCSUM
1540 | ETHERCAP_JUMBO_MTU
1541 | ETHERCAP_VLAN_MTU;
1542
1543 /* Enable the above capabilities by default */
1544 ec->ec_capenable = ec->ec_capabilities;
1545
1546 /* Don't enable LRO by default */
1547 ifp->if_capabilities |= IFCAP_LRO;
1548 #if 0
1549 ifp->if_capenable = ifp->if_capabilities;
1550 #endif
1551
1552 /*
1553 * Specify the media types supported by this adapter and register
1554 * callbacks to update media and link information
1555 */
1556 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1557 ixv_media_status);
1558 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1559 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1560
1561 return 0;
1562 } /* ixv_setup_interface */
1563
1564
1565 /************************************************************************
1566 * ixv_initialize_transmit_units - Enable transmit unit.
1567 ************************************************************************/
1568 static void
1569 ixv_initialize_transmit_units(struct adapter *adapter)
1570 {
1571 struct tx_ring *txr = adapter->tx_rings;
1572 struct ixgbe_hw *hw = &adapter->hw;
1573 int i;
1574
1575 for (i = 0; i < adapter->num_queues; i++, txr++) {
1576 u64 tdba = txr->txdma.dma_paddr;
1577 u32 txctrl, txdctl;
1578 int j = txr->me;
1579
1580 /* Set WTHRESH to 8, burst writeback */
1581 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1582 txdctl |= (8 << 16);
1583 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1584
1585 /* Set the HW Tx Head and Tail indices */
1586 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1588
1589 /* Set Tx Tail register */
1590 txr->tail = IXGBE_VFTDT(j);
1591
1592 /* Set Ring parameters */
1593 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1594 (tdba & 0x00000000ffffffffULL));
1595 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1596 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1597 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1598 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1599 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1600 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1601
1602 /* Now enable */
1603 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1604 txdctl |= IXGBE_TXDCTL_ENABLE;
1605 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1606 }
1607
1608 return;
1609 } /* ixv_initialize_transmit_units */
1610
1611
1612 /************************************************************************
1613 * ixv_initialize_rss_mapping
1614 ************************************************************************/
1615 static void
1616 ixv_initialize_rss_mapping(struct adapter *adapter)
1617 {
1618 struct ixgbe_hw *hw = &adapter->hw;
1619 u32 reta = 0, mrqc, rss_key[10];
1620 int queue_id;
1621 int i, j;
1622 u32 rss_hash_config;
1623
1624 /* force use default RSS key. */
1625 #ifdef __NetBSD__
1626 rss_getkey((uint8_t *) &rss_key);
1627 #else
1628 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1629 /* Fetch the configured RSS key */
1630 rss_getkey((uint8_t *)&rss_key);
1631 } else {
1632 /* set up random bits */
1633 cprng_fast(&rss_key, sizeof(rss_key));
1634 }
1635 #endif
1636
1637 /* Now fill out hash function seeds */
1638 for (i = 0; i < 10; i++)
1639 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1640
1641 /* Set up the redirection table */
1642 for (i = 0, j = 0; i < 64; i++, j++) {
1643 if (j == adapter->num_queues)
1644 j = 0;
1645
1646 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1647 /*
1648 * Fetch the RSS bucket id for the given indirection
1649 * entry. Cap it at the number of configured buckets
1650 * (which is num_queues.)
1651 */
1652 queue_id = rss_get_indirection_to_bucket(i);
1653 queue_id = queue_id % adapter->num_queues;
1654 } else
1655 queue_id = j;
1656
1657 /*
1658 * The low 8 bits are for hash value (n+0);
1659 * The next 8 bits are for hash value (n+1), etc.
1660 */
1661 reta >>= 8;
1662 reta |= ((uint32_t)queue_id) << 24;
1663 if ((i & 3) == 3) {
1664 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1665 reta = 0;
1666 }
1667 }
1668
1669 /* Perform hash on these packet types */
1670 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1671 rss_hash_config = rss_gethashconfig();
1672 else {
1673 /*
1674 * Disable UDP - IP fragments aren't currently being handled
1675 * and so we end up with a mix of 2-tuple and 4-tuple
1676 * traffic.
1677 */
1678 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1679 | RSS_HASHTYPE_RSS_TCP_IPV4
1680 | RSS_HASHTYPE_RSS_IPV6
1681 | RSS_HASHTYPE_RSS_TCP_IPV6;
1682 }
1683
1684 mrqc = IXGBE_MRQC_RSSEN;
1685 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1686 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1687 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1688 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1689 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1690 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1691 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1692 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1693 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1694 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1695 __func__);
1696 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1697 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1698 __func__);
1699 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1700 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1701 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1702 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1703 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1704 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1705 __func__);
1706 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1707 } /* ixv_initialize_rss_mapping */
1708
1709
1710 /************************************************************************
1711 * ixv_initialize_receive_units - Setup receive registers and features.
1712 ************************************************************************/
1713 static void
1714 ixv_initialize_receive_units(struct adapter *adapter)
1715 {
1716 struct rx_ring *rxr = adapter->rx_rings;
1717 struct ixgbe_hw *hw = &adapter->hw;
1718 struct ifnet *ifp = adapter->ifp;
1719 u32 bufsz, rxcsum, psrtype;
1720
1721 if (ifp->if_mtu > ETHERMTU)
1722 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1723 else
1724 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1725
1726 psrtype = IXGBE_PSRTYPE_TCPHDR
1727 | IXGBE_PSRTYPE_UDPHDR
1728 | IXGBE_PSRTYPE_IPV4HDR
1729 | IXGBE_PSRTYPE_IPV6HDR
1730 | IXGBE_PSRTYPE_L2HDR;
1731
1732 if (adapter->num_queues > 1)
1733 psrtype |= 1 << 29;
1734
1735 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1736
1737 /* Tell PF our max_frame size */
1738 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1739 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1740 }
1741
1742 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1743 u64 rdba = rxr->rxdma.dma_paddr;
1744 u32 reg, rxdctl;
1745 int j = rxr->me;
1746
1747 /* Disable the queue */
1748 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1749 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1750 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1751 for (int k = 0; k < 10; k++) {
1752 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1753 IXGBE_RXDCTL_ENABLE)
1754 msec_delay(1);
1755 else
1756 break;
1757 }
1758 wmb();
1759 /* Setup the Base and Length of the Rx Descriptor Ring */
1760 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1761 (rdba & 0x00000000ffffffffULL));
1762 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1763 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1764 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1765
1766 /* Reset the ring indices */
1767 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1768 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1769
1770 /* Set up the SRRCTL register */
1771 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1772 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1773 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1774 reg |= bufsz;
1775 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1776 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1777
1778 /* Capture Rx Tail index */
1779 rxr->tail = IXGBE_VFRDT(rxr->me);
1780
1781 /* Do the queue enabling last */
1782 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1783 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1784 for (int k = 0; k < 10; k++) {
1785 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1786 IXGBE_RXDCTL_ENABLE)
1787 break;
1788 msec_delay(1);
1789 }
1790 wmb();
1791
1792 /* Set the Tail Pointer */
1793 #ifdef DEV_NETMAP
1794 /*
1795 * In netmap mode, we must preserve the buffers made
1796 * available to userspace before the if_init()
1797 * (this is true by default on the TX side, because
1798 * init makes all buffers available to userspace).
1799 *
1800 * netmap_reset() and the device specific routines
1801 * (e.g. ixgbe_setup_receive_rings()) map these
1802 * buffers at the end of the NIC ring, so here we
1803 * must set the RDT (tail) register to make sure
1804 * they are not overwritten.
1805 *
1806 * In this driver the NIC ring starts at RDH = 0,
1807 * RDT points to the last slot available for reception (?),
1808 * so RDT = num_rx_desc - 1 means the whole ring is available.
1809 */
1810 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1811 (ifp->if_capenable & IFCAP_NETMAP)) {
1812 struct netmap_adapter *na = NA(adapter->ifp);
1813 struct netmap_kring *kring = &na->rx_rings[i];
1814 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1815
1816 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1817 } else
1818 #endif /* DEV_NETMAP */
1819 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1820 adapter->num_rx_desc - 1);
1821 }
1822
1823 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1824
1825 ixv_initialize_rss_mapping(adapter);
1826
1827 if (adapter->num_queues > 1) {
1828 /* RSS and RX IPP Checksum are mutually exclusive */
1829 rxcsum |= IXGBE_RXCSUM_PCSD;
1830 }
1831
1832 if (ifp->if_capenable & IFCAP_RXCSUM)
1833 rxcsum |= IXGBE_RXCSUM_PCSD;
1834
1835 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1836 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1837
1838 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1839 } /* ixv_initialize_receive_units */
1840
1841 /************************************************************************
1842 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1843 *
1844 * Retrieves the TDH value from the hardware
1845 ************************************************************************/
1846 static int
1847 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1848 {
1849 struct sysctlnode node = *rnode;
1850 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1851 uint32_t val;
1852
1853 if (!txr)
1854 return (0);
1855
1856 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1857 node.sysctl_data = &val;
1858 return sysctl_lookup(SYSCTLFN_CALL(&node));
1859 } /* ixv_sysctl_tdh_handler */
1860
1861 /************************************************************************
1862 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1863 *
1864 * Retrieves the TDT value from the hardware
1865 ************************************************************************/
1866 static int
1867 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1868 {
1869 struct sysctlnode node = *rnode;
1870 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1871 uint32_t val;
1872
1873 if (!txr)
1874 return (0);
1875
1876 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1877 node.sysctl_data = &val;
1878 return sysctl_lookup(SYSCTLFN_CALL(&node));
1879 } /* ixv_sysctl_tdt_handler */
1880
1881 /************************************************************************
1882 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1883 *
1884 * Retrieves the RDH value from the hardware
1885 ************************************************************************/
1886 static int
1887 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1888 {
1889 struct sysctlnode node = *rnode;
1890 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1891 uint32_t val;
1892
1893 if (!rxr)
1894 return (0);
1895
1896 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1897 node.sysctl_data = &val;
1898 return sysctl_lookup(SYSCTLFN_CALL(&node));
1899 } /* ixv_sysctl_rdh_handler */
1900
1901 /************************************************************************
1902 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1903 *
1904 * Retrieves the RDT value from the hardware
1905 ************************************************************************/
1906 static int
1907 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1908 {
1909 struct sysctlnode node = *rnode;
1910 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1911 uint32_t val;
1912
1913 if (!rxr)
1914 return (0);
1915
1916 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1917 node.sysctl_data = &val;
1918 return sysctl_lookup(SYSCTLFN_CALL(&node));
1919 } /* ixv_sysctl_rdt_handler */
1920
1921 /************************************************************************
1922 * ixv_setup_vlan_support
1923 ************************************************************************/
1924 static void
1925 ixv_setup_vlan_support(struct adapter *adapter)
1926 {
1927 struct ethercom *ec = &adapter->osdep.ec;
1928 struct ixgbe_hw *hw = &adapter->hw;
1929 struct rx_ring *rxr;
1930 u32 ctrl, vid, vfta, retry;
1931
1932 /*
1933 * We get here thru init_locked, meaning
1934 * a soft reset, this has already cleared
1935 * the VFTA and other state, so if there
1936 * have been no vlan's registered do nothing.
1937 */
1938 if (!VLAN_ATTACHED(ec))
1939 return;
1940
1941 /* Enable the queues */
1942 for (int i = 0; i < adapter->num_queues; i++) {
1943 rxr = &adapter->rx_rings[i];
1944 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1945 ctrl |= IXGBE_RXDCTL_VME;
1946 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1947 /*
1948 * Let Rx path know that it needs to store VLAN tag
1949 * as part of extra mbuf info.
1950 */
1951 rxr->vtag_strip = TRUE;
1952 }
1953
1954 #if 1
1955 /* XXX dirty hack. Enable all VIDs */
1956 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1957 adapter->shadow_vfta[i] = 0xffffffff;
1958 #endif
1959 /*
1960 * A soft reset zero's out the VFTA, so
1961 * we need to repopulate it now.
1962 */
1963 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1964 if (adapter->shadow_vfta[i] == 0)
1965 continue;
1966 vfta = adapter->shadow_vfta[i];
1967 /*
1968 * Reconstruct the vlan id's
1969 * based on the bits set in each
1970 * of the array ints.
1971 */
1972 for (int j = 0; j < 32; j++) {
1973 retry = 0;
1974 if ((vfta & (1 << j)) == 0)
1975 continue;
1976 vid = (i * 32) + j;
1977 /* Call the shared code mailbox routine */
1978 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1979 if (++retry > 5)
1980 break;
1981 }
1982 }
1983 }
1984 } /* ixv_setup_vlan_support */
1985
1986 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1987 /************************************************************************
1988 * ixv_register_vlan
1989 *
1990 * Run via a vlan config EVENT, it enables us to use the
1991 * HW Filter table since we can get the vlan id. This just
1992 * creates the entry in the soft version of the VFTA, init
1993 * will repopulate the real table.
1994 ************************************************************************/
1995 static void
1996 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1997 {
1998 struct adapter *adapter = ifp->if_softc;
1999 u16 index, bit;
2000
2001 if (ifp->if_softc != arg) /* Not our event */
2002 return;
2003
2004 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2005 return;
2006
2007 IXGBE_CORE_LOCK(adapter);
2008 index = (vtag >> 5) & 0x7F;
2009 bit = vtag & 0x1F;
2010 adapter->shadow_vfta[index] |= (1 << bit);
2011 /* Re-init to load the changes */
2012 ixv_init_locked(adapter);
2013 IXGBE_CORE_UNLOCK(adapter);
2014 } /* ixv_register_vlan */
2015
2016 /************************************************************************
2017 * ixv_unregister_vlan
2018 *
2019 * Run via a vlan unconfig EVENT, remove our entry
2020 * in the soft vfta.
2021 ************************************************************************/
2022 static void
2023 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2024 {
2025 struct adapter *adapter = ifp->if_softc;
2026 u16 index, bit;
2027
2028 if (ifp->if_softc != arg)
2029 return;
2030
2031 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2032 return;
2033
2034 IXGBE_CORE_LOCK(adapter);
2035 index = (vtag >> 5) & 0x7F;
2036 bit = vtag & 0x1F;
2037 adapter->shadow_vfta[index] &= ~(1 << bit);
2038 /* Re-init to load the changes */
2039 ixv_init_locked(adapter);
2040 IXGBE_CORE_UNLOCK(adapter);
2041 } /* ixv_unregister_vlan */
2042 #endif
2043
2044 /************************************************************************
2045 * ixv_enable_intr
2046 ************************************************************************/
2047 static void
2048 ixv_enable_intr(struct adapter *adapter)
2049 {
2050 struct ixgbe_hw *hw = &adapter->hw;
2051 struct ix_queue *que = adapter->queues;
2052 u32 mask;
2053 int i;
2054
2055 /* For VTEIAC */
2056 mask = (1 << adapter->vector);
2057 for (i = 0; i < adapter->num_queues; i++, que++)
2058 mask |= (1 << que->msix);
2059 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2060
2061 /* For VTEIMS */
2062 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2063 que = adapter->queues;
2064 for (i = 0; i < adapter->num_queues; i++, que++)
2065 ixv_enable_queue(adapter, que->msix);
2066
2067 IXGBE_WRITE_FLUSH(hw);
2068 } /* ixv_enable_intr */
2069
2070 /************************************************************************
2071 * ixv_disable_intr
2072 ************************************************************************/
2073 static void
2074 ixv_disable_intr(struct adapter *adapter)
2075 {
2076 struct ix_queue *que = adapter->queues;
2077
2078 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2079
2080 /* disable interrupts other than queues */
2081 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2082
2083 for (int i = 0; i < adapter->num_queues; i++, que++)
2084 ixv_disable_queue(adapter, que->msix);
2085
2086 IXGBE_WRITE_FLUSH(&adapter->hw);
2087 } /* ixv_disable_intr */
2088
2089 /************************************************************************
2090 * ixv_set_ivar
2091 *
2092 * Setup the correct IVAR register for a particular MSI-X interrupt
2093 * - entry is the register array entry
2094 * - vector is the MSI-X vector for this queue
2095 * - type is RX/TX/MISC
2096 ************************************************************************/
2097 static void
2098 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2099 {
2100 struct ixgbe_hw *hw = &adapter->hw;
2101 u32 ivar, index;
2102
2103 vector |= IXGBE_IVAR_ALLOC_VAL;
2104
2105 if (type == -1) { /* MISC IVAR */
2106 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2107 ivar &= ~0xFF;
2108 ivar |= vector;
2109 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2110 } else { /* RX/TX IVARS */
2111 index = (16 * (entry & 1)) + (8 * type);
2112 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2113 ivar &= ~(0xFF << index);
2114 ivar |= (vector << index);
2115 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2116 }
2117 } /* ixv_set_ivar */
2118
2119 /************************************************************************
2120 * ixv_configure_ivars
2121 ************************************************************************/
2122 static void
2123 ixv_configure_ivars(struct adapter *adapter)
2124 {
2125 struct ix_queue *que = adapter->queues;
2126
2127 /* XXX We should sync EITR value calculation with ixgbe.c? */
2128
2129 for (int i = 0; i < adapter->num_queues; i++, que++) {
2130 /* First the RX queue entry */
2131 ixv_set_ivar(adapter, i, que->msix, 0);
2132 /* ... and the TX */
2133 ixv_set_ivar(adapter, i, que->msix, 1);
2134 /* Set an initial value in EITR */
2135 ixv_eitr_write(que, IXGBE_EITR_DEFAULT);
2136 }
2137
2138 /* For the mailbox interrupt */
2139 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2140 } /* ixv_configure_ivars */
2141
2142
2143 /************************************************************************
2144 * ixv_save_stats
2145 *
2146 * The VF stats registers never have a truly virgin
2147 * starting point, so this routine tries to make an
2148 * artificial one, marking ground zero on attach as
2149 * it were.
2150 ************************************************************************/
2151 static void
2152 ixv_save_stats(struct adapter *adapter)
2153 {
2154 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2155
2156 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2157 stats->saved_reset_vfgprc +=
2158 stats->vfgprc.ev_count - stats->base_vfgprc;
2159 stats->saved_reset_vfgptc +=
2160 stats->vfgptc.ev_count - stats->base_vfgptc;
2161 stats->saved_reset_vfgorc +=
2162 stats->vfgorc.ev_count - stats->base_vfgorc;
2163 stats->saved_reset_vfgotc +=
2164 stats->vfgotc.ev_count - stats->base_vfgotc;
2165 stats->saved_reset_vfmprc +=
2166 stats->vfmprc.ev_count - stats->base_vfmprc;
2167 }
2168 } /* ixv_save_stats */
2169
2170 /************************************************************************
2171 * ixv_init_stats
2172 ************************************************************************/
2173 static void
2174 ixv_init_stats(struct adapter *adapter)
2175 {
2176 struct ixgbe_hw *hw = &adapter->hw;
2177
2178 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2179 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2180 adapter->stats.vf.last_vfgorc |=
2181 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2182
2183 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2184 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2185 adapter->stats.vf.last_vfgotc |=
2186 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2187
2188 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2189
2190 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2191 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2192 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2193 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2194 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2195 } /* ixv_init_stats */
2196
2197 #define UPDATE_STAT_32(reg, last, count) \
2198 { \
2199 u32 current = IXGBE_READ_REG(hw, (reg)); \
2200 if (current < (last)) \
2201 count.ev_count += 0x100000000LL; \
2202 (last) = current; \
2203 count.ev_count &= 0xFFFFFFFF00000000LL; \
2204 count.ev_count |= current; \
2205 }
2206
2207 #define UPDATE_STAT_36(lsb, msb, last, count) \
2208 { \
2209 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2210 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2211 u64 current = ((cur_msb << 32) | cur_lsb); \
2212 if (current < (last)) \
2213 count.ev_count += 0x1000000000LL; \
2214 (last) = current; \
2215 count.ev_count &= 0xFFFFFFF000000000LL; \
2216 count.ev_count |= current; \
2217 }
2218
2219 /************************************************************************
2220 * ixv_update_stats - Update the board statistics counters.
2221 ************************************************************************/
2222 void
2223 ixv_update_stats(struct adapter *adapter)
2224 {
2225 struct ixgbe_hw *hw = &adapter->hw;
2226 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2227
2228 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2229 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2230 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2231 stats->vfgorc);
2232 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2233 stats->vfgotc);
2234 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2235
2236 /* Fill out the OS statistics structure */
2237 /*
2238 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2239 * adapter->stats counters. It's required to make ifconfig -z
2240 * (SOICZIFDATA) work.
2241 */
2242 } /* ixv_update_stats */
2243
2244 /************************************************************************
2245 * ixv_sysctl_interrupt_rate_handler
2246 ************************************************************************/
2247 static int
2248 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2249 {
2250 struct sysctlnode node = *rnode;
2251 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2252 struct adapter *adapter = que->adapter;
2253 uint32_t reg, usec, rate;
2254 int error;
2255
2256 if (que == NULL)
2257 return 0;
2258 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2259 usec = ((reg & 0x0FF8) >> 3);
2260 if (usec > 0)
2261 rate = 500000 / usec;
2262 else
2263 rate = 0;
2264 node.sysctl_data = &rate;
2265 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2266 if (error || newp == NULL)
2267 return error;
2268 reg &= ~0xfff; /* default, no limitation */
2269 if (rate > 0 && rate < 500000) {
2270 if (rate < 1000)
2271 rate = 1000;
2272 reg |= ((4000000/rate) & 0xff8);
2273 /*
2274 * When RSC is used, ITR interval must be larger than
2275 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2276 * The minimum value is always greater than 2us on 100M
2277 * (and 10M?(not documented)), but it's not on 1G and higher.
2278 */
2279 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2280 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2281 if ((adapter->num_queues > 1)
2282 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2283 return EINVAL;
2284 }
2285 ixv_max_interrupt_rate = rate;
2286 } else
2287 ixv_max_interrupt_rate = 0;
2288 ixv_eitr_write(que, reg);
2289
2290 return (0);
2291 } /* ixv_sysctl_interrupt_rate_handler */
2292
2293 const struct sysctlnode *
2294 ixv_sysctl_instance(struct adapter *adapter)
2295 {
2296 const char *dvname;
2297 struct sysctllog **log;
2298 int rc;
2299 const struct sysctlnode *rnode;
2300
2301 log = &adapter->sysctllog;
2302 dvname = device_xname(adapter->dev);
2303
2304 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2305 0, CTLTYPE_NODE, dvname,
2306 SYSCTL_DESCR("ixv information and settings"),
2307 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2308 goto err;
2309
2310 return rnode;
2311 err:
2312 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2313 return NULL;
2314 }
2315
2316 static void
2317 ixv_add_device_sysctls(struct adapter *adapter)
2318 {
2319 struct sysctllog **log;
2320 const struct sysctlnode *rnode, *cnode;
2321 device_t dev;
2322
2323 dev = adapter->dev;
2324 log = &adapter->sysctllog;
2325
2326 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2327 aprint_error_dev(dev, "could not create sysctl root\n");
2328 return;
2329 }
2330
2331 if (sysctl_createv(log, 0, &rnode, &cnode,
2332 CTLFLAG_READWRITE, CTLTYPE_INT,
2333 "debug", SYSCTL_DESCR("Debug Info"),
2334 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2335 aprint_error_dev(dev, "could not create sysctl\n");
2336
2337 if (sysctl_createv(log, 0, &rnode, &cnode,
2338 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2339 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2340 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2341 aprint_error_dev(dev, "could not create sysctl\n");
2342
2343 if (sysctl_createv(log, 0, &rnode, &cnode,
2344 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2345 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2346 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2347 aprint_error_dev(dev, "could not create sysctl\n");
2348 }
2349
2350 /************************************************************************
2351 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2352 ************************************************************************/
2353 static void
2354 ixv_add_stats_sysctls(struct adapter *adapter)
2355 {
2356 device_t dev = adapter->dev;
2357 struct tx_ring *txr = adapter->tx_rings;
2358 struct rx_ring *rxr = adapter->rx_rings;
2359 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2360 struct ixgbe_hw *hw = &adapter->hw;
2361 const struct sysctlnode *rnode, *cnode;
2362 struct sysctllog **log = &adapter->sysctllog;
2363 const char *xname = device_xname(dev);
2364
2365 /* Driver Statistics */
2366 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2367 NULL, xname, "Driver tx dma soft fail EFBIG");
2368 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2369 NULL, xname, "m_defrag() failed");
2370 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2371 NULL, xname, "Driver tx dma hard fail EFBIG");
2372 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2373 NULL, xname, "Driver tx dma hard fail EINVAL");
2374 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2375 NULL, xname, "Driver tx dma hard fail other");
2376 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2377 NULL, xname, "Driver tx dma soft fail EAGAIN");
2378 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2379 NULL, xname, "Driver tx dma soft fail ENOMEM");
2380 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2381 NULL, xname, "Watchdog timeouts");
2382 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2383 NULL, xname, "TSO errors");
2384 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2385 NULL, xname, "Link MSI-X IRQ Handled");
2386
2387 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2388 snprintf(adapter->queues[i].evnamebuf,
2389 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2390 xname, i);
2391 snprintf(adapter->queues[i].namebuf,
2392 sizeof(adapter->queues[i].namebuf), "q%d", i);
2393
2394 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2395 aprint_error_dev(dev, "could not create sysctl root\n");
2396 break;
2397 }
2398
2399 if (sysctl_createv(log, 0, &rnode, &rnode,
2400 0, CTLTYPE_NODE,
2401 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2402 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2403 break;
2404
2405 if (sysctl_createv(log, 0, &rnode, &cnode,
2406 CTLFLAG_READWRITE, CTLTYPE_INT,
2407 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2408 ixv_sysctl_interrupt_rate_handler, 0,
2409 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2410 break;
2411
2412 if (sysctl_createv(log, 0, &rnode, &cnode,
2413 CTLFLAG_READONLY, CTLTYPE_INT,
2414 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2415 ixv_sysctl_tdh_handler, 0, (void *)txr,
2416 0, CTL_CREATE, CTL_EOL) != 0)
2417 break;
2418
2419 if (sysctl_createv(log, 0, &rnode, &cnode,
2420 CTLFLAG_READONLY, CTLTYPE_INT,
2421 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2422 ixv_sysctl_tdt_handler, 0, (void *)txr,
2423 0, CTL_CREATE, CTL_EOL) != 0)
2424 break;
2425
2426 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2427 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2428 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2429 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2430 "Handled queue in softint");
2431 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2432 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2433 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2434 NULL, adapter->queues[i].evnamebuf, "TSO");
2435 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2436 NULL, adapter->queues[i].evnamebuf,
2437 "Queue No Descriptor Available");
2438 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2439 NULL, adapter->queues[i].evnamebuf,
2440 "Queue Packets Transmitted");
2441 #ifndef IXGBE_LEGACY_TX
2442 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2443 NULL, adapter->queues[i].evnamebuf,
2444 "Packets dropped in pcq");
2445 #endif
2446
2447 #ifdef LRO
2448 struct lro_ctrl *lro = &rxr->lro;
2449 #endif /* LRO */
2450
2451 if (sysctl_createv(log, 0, &rnode, &cnode,
2452 CTLFLAG_READONLY,
2453 CTLTYPE_INT,
2454 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2455 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2456 CTL_CREATE, CTL_EOL) != 0)
2457 break;
2458
2459 if (sysctl_createv(log, 0, &rnode, &cnode,
2460 CTLFLAG_READONLY,
2461 CTLTYPE_INT,
2462 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2463 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2464 CTL_CREATE, CTL_EOL) != 0)
2465 break;
2466
2467 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2468 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2469 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2470 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2471 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2472 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2473 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2474 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2475 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2476 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2477 #ifdef LRO
2478 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2479 CTLFLAG_RD, &lro->lro_queued, 0,
2480 "LRO Queued");
2481 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2482 CTLFLAG_RD, &lro->lro_flushed, 0,
2483 "LRO Flushed");
2484 #endif /* LRO */
2485 }
2486
2487 /* MAC stats get their own sub node */
2488
2489 snprintf(stats->namebuf,
2490 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2491
2492 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2493 stats->namebuf, "rx csum offload - IP");
2494 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2495 stats->namebuf, "rx csum offload - L4");
2496 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2497 stats->namebuf, "rx csum offload - IP bad");
2498 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2499 stats->namebuf, "rx csum offload - L4 bad");
2500
2501 /* Packet Reception Stats */
2502 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2503 xname, "Good Packets Received");
2504 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2505 xname, "Good Octets Received");
2506 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2507 xname, "Multicast Packets Received");
2508 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2509 xname, "Good Packets Transmitted");
2510 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2511 xname, "Good Octets Transmitted");
2512
2513 /* Mailbox Stats */
2514 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2515 xname, "message TXs");
2516 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2517 xname, "message RXs");
2518 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2519 xname, "ACKs");
2520 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2521 xname, "REQs");
2522 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2523 xname, "RSTs");
2524
2525 } /* ixv_add_stats_sysctls */
2526
2527 /************************************************************************
2528 * ixv_set_sysctl_value
2529 ************************************************************************/
2530 static void
2531 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2532 const char *description, int *limit, int value)
2533 {
2534 device_t dev = adapter->dev;
2535 struct sysctllog **log;
2536 const struct sysctlnode *rnode, *cnode;
2537
2538 log = &adapter->sysctllog;
2539 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2540 aprint_error_dev(dev, "could not create sysctl root\n");
2541 return;
2542 }
2543 if (sysctl_createv(log, 0, &rnode, &cnode,
2544 CTLFLAG_READWRITE, CTLTYPE_INT,
2545 name, SYSCTL_DESCR(description),
2546 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2547 aprint_error_dev(dev, "could not create sysctl\n");
2548 *limit = value;
2549 } /* ixv_set_sysctl_value */
2550
2551 /************************************************************************
2552 * ixv_print_debug_info
2553 *
2554 * Called only when em_display_debug_stats is enabled.
2555 * Provides a way to take a look at important statistics
2556 * maintained by the driver and hardware.
2557 ************************************************************************/
2558 static void
2559 ixv_print_debug_info(struct adapter *adapter)
2560 {
2561 device_t dev = adapter->dev;
2562 struct ixgbe_hw *hw = &adapter->hw;
2563 struct ix_queue *que = adapter->queues;
2564 struct rx_ring *rxr;
2565 struct tx_ring *txr;
2566 #ifdef LRO
2567 struct lro_ctrl *lro;
2568 #endif /* LRO */
2569
2570 device_printf(dev, "Error Byte Count = %u \n",
2571 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2572
2573 for (int i = 0; i < adapter->num_queues; i++, que++) {
2574 txr = que->txr;
2575 rxr = que->rxr;
2576 #ifdef LRO
2577 lro = &rxr->lro;
2578 #endif /* LRO */
2579 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2580 que->msix, (long)que->irqs.ev_count);
2581 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2582 rxr->me, (long long)rxr->rx_packets.ev_count);
2583 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2584 rxr->me, (long)rxr->rx_bytes.ev_count);
2585 #ifdef LRO
2586 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2587 rxr->me, (long long)lro->lro_queued);
2588 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2589 rxr->me, (long long)lro->lro_flushed);
2590 #endif /* LRO */
2591 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2592 txr->me, (long)txr->total_packets.ev_count);
2593 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2594 txr->me, (long)txr->no_desc_avail.ev_count);
2595 }
2596
2597 device_printf(dev, "MBX IRQ Handled: %lu\n",
2598 (long)adapter->link_irq.ev_count);
2599 } /* ixv_print_debug_info */
2600
2601 /************************************************************************
2602 * ixv_sysctl_debug
2603 ************************************************************************/
2604 static int
2605 ixv_sysctl_debug(SYSCTLFN_ARGS)
2606 {
2607 struct sysctlnode node;
2608 struct adapter *adapter;
2609 int error, result;
2610
2611 node = *rnode;
2612 node.sysctl_data = &result;
2613 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2614
2615 if (error || newp == NULL)
2616 return error;
2617
2618 if (result == 1) {
2619 adapter = (struct adapter *)node.sysctl_data;
2620 ixv_print_debug_info(adapter);
2621 }
2622
2623 return 0;
2624 } /* ixv_sysctl_debug */
2625
2626 /************************************************************************
2627 * ixv_init_device_features
2628 ************************************************************************/
2629 static void
2630 ixv_init_device_features(struct adapter *adapter)
2631 {
2632 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2633 | IXGBE_FEATURE_VF
2634 | IXGBE_FEATURE_RSS
2635 | IXGBE_FEATURE_LEGACY_TX;
2636
2637 /* A tad short on feature flags for VFs, atm. */
2638 switch (adapter->hw.mac.type) {
2639 case ixgbe_mac_82599_vf:
2640 break;
2641 case ixgbe_mac_X540_vf:
2642 break;
2643 case ixgbe_mac_X550_vf:
2644 case ixgbe_mac_X550EM_x_vf:
2645 case ixgbe_mac_X550EM_a_vf:
2646 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2647 break;
2648 default:
2649 break;
2650 }
2651
2652 /* Enabled by default... */
2653 /* Is a virtual function (VF) */
2654 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2655 adapter->feat_en |= IXGBE_FEATURE_VF;
2656 /* Netmap */
2657 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2658 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2659 /* Receive-Side Scaling (RSS) */
2660 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2661 adapter->feat_en |= IXGBE_FEATURE_RSS;
2662 /* Needs advanced context descriptor regardless of offloads req'd */
2663 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2664 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2665
2666 /* Enabled via sysctl... */
2667 /* Legacy (single queue) transmit */
2668 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2669 ixv_enable_legacy_tx)
2670 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2671 } /* ixv_init_device_features */
2672
2673 /************************************************************************
2674 * ixv_shutdown - Shutdown entry point
2675 ************************************************************************/
2676 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2677 static int
2678 ixv_shutdown(device_t dev)
2679 {
2680 struct adapter *adapter = device_private(dev);
2681 IXGBE_CORE_LOCK(adapter);
2682 ixv_stop(adapter);
2683 IXGBE_CORE_UNLOCK(adapter);
2684
2685 return (0);
2686 } /* ixv_shutdown */
2687 #endif
2688
2689 static int
2690 ixv_ifflags_cb(struct ethercom *ec)
2691 {
2692 struct ifnet *ifp = &ec->ec_if;
2693 struct adapter *adapter = ifp->if_softc;
2694 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2695
2696 IXGBE_CORE_LOCK(adapter);
2697
2698 if (change != 0)
2699 adapter->if_flags = ifp->if_flags;
2700
2701 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2702 rc = ENETRESET;
2703
2704 /* Set up VLAN support and filter */
2705 ixv_setup_vlan_support(adapter);
2706
2707 IXGBE_CORE_UNLOCK(adapter);
2708
2709 return rc;
2710 }
2711
2712
2713 /************************************************************************
2714 * ixv_ioctl - Ioctl entry point
2715 *
2716 * Called when the user wants to configure the interface.
2717 *
2718 * return 0 on success, positive on failure
2719 ************************************************************************/
2720 static int
2721 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2722 {
2723 struct adapter *adapter = ifp->if_softc;
2724 struct ifcapreq *ifcr = data;
2725 struct ifreq *ifr = data;
2726 int error = 0;
2727 int l4csum_en;
2728 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2729 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2730
2731 switch (command) {
2732 case SIOCSIFFLAGS:
2733 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2734 break;
2735 case SIOCADDMULTI:
2736 case SIOCDELMULTI:
2737 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2738 break;
2739 case SIOCSIFMEDIA:
2740 case SIOCGIFMEDIA:
2741 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2742 break;
2743 case SIOCSIFCAP:
2744 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2745 break;
2746 case SIOCSIFMTU:
2747 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2748 break;
2749 default:
2750 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2751 break;
2752 }
2753
2754 switch (command) {
2755 case SIOCSIFMEDIA:
2756 case SIOCGIFMEDIA:
2757 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2758 case SIOCSIFCAP:
2759 /* Layer-4 Rx checksum offload has to be turned on and
2760 * off as a unit.
2761 */
2762 l4csum_en = ifcr->ifcr_capenable & l4csum;
2763 if (l4csum_en != l4csum && l4csum_en != 0)
2764 return EINVAL;
2765 /*FALLTHROUGH*/
2766 case SIOCADDMULTI:
2767 case SIOCDELMULTI:
2768 case SIOCSIFFLAGS:
2769 case SIOCSIFMTU:
2770 default:
2771 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2772 return error;
2773 if ((ifp->if_flags & IFF_RUNNING) == 0)
2774 ;
2775 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2776 IXGBE_CORE_LOCK(adapter);
2777 ixv_init_locked(adapter);
2778 IXGBE_CORE_UNLOCK(adapter);
2779 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2780 /*
2781 * Multicast list has changed; set the hardware filter
2782 * accordingly.
2783 */
2784 IXGBE_CORE_LOCK(adapter);
2785 ixv_disable_intr(adapter);
2786 ixv_set_multi(adapter);
2787 ixv_enable_intr(adapter);
2788 IXGBE_CORE_UNLOCK(adapter);
2789 }
2790 return 0;
2791 }
2792 } /* ixv_ioctl */
2793
2794 /************************************************************************
2795 * ixv_init
2796 ************************************************************************/
2797 static int
2798 ixv_init(struct ifnet *ifp)
2799 {
2800 struct adapter *adapter = ifp->if_softc;
2801
2802 IXGBE_CORE_LOCK(adapter);
2803 ixv_init_locked(adapter);
2804 IXGBE_CORE_UNLOCK(adapter);
2805
2806 return 0;
2807 } /* ixv_init */
2808
2809 /************************************************************************
2810 * ixv_handle_que
2811 ************************************************************************/
2812 static void
2813 ixv_handle_que(void *context)
2814 {
2815 struct ix_queue *que = context;
2816 struct adapter *adapter = que->adapter;
2817 struct tx_ring *txr = que->txr;
2818 struct ifnet *ifp = adapter->ifp;
2819 bool more;
2820
2821 que->handleq.ev_count++;
2822
2823 if (ifp->if_flags & IFF_RUNNING) {
2824 more = ixgbe_rxeof(que);
2825 IXGBE_TX_LOCK(txr);
2826 more |= ixgbe_txeof(txr);
2827 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2828 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2829 ixgbe_mq_start_locked(ifp, txr);
2830 /* Only for queue 0 */
2831 /* NetBSD still needs this for CBQ */
2832 if ((&adapter->queues[0] == que)
2833 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2834 ixgbe_legacy_start_locked(ifp, txr);
2835 IXGBE_TX_UNLOCK(txr);
2836 if (more) {
2837 que->req.ev_count++;
2838 if (adapter->txrx_use_workqueue) {
2839 /*
2840 * "enqueued flag" is not required here
2841 * the same as ixg(4). See ixgbe_msix_que().
2842 */
2843 workqueue_enqueue(adapter->que_wq,
2844 &que->wq_cookie, curcpu());
2845 } else
2846 softint_schedule(que->que_si);
2847 return;
2848 }
2849 }
2850
2851 /* Re-enable this interrupt */
2852 ixv_enable_queue(adapter, que->msix);
2853
2854 return;
2855 } /* ixv_handle_que */
2856
2857 /************************************************************************
2858 * ixv_handle_que_work
2859 ************************************************************************/
2860 static void
2861 ixv_handle_que_work(struct work *wk, void *context)
2862 {
2863 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2864
2865 /*
2866 * "enqueued flag" is not required here the same as ixg(4).
2867 * See ixgbe_msix_que().
2868 */
2869 ixv_handle_que(que);
2870 }
2871
2872 /************************************************************************
2873 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2874 ************************************************************************/
2875 static int
2876 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2877 {
2878 device_t dev = adapter->dev;
2879 struct ix_queue *que = adapter->queues;
2880 struct tx_ring *txr = adapter->tx_rings;
2881 int error, msix_ctrl, rid, vector = 0;
2882 pci_chipset_tag_t pc;
2883 pcitag_t tag;
2884 char intrbuf[PCI_INTRSTR_LEN];
2885 char wqname[MAXCOMLEN];
2886 char intr_xname[32];
2887 const char *intrstr = NULL;
2888 kcpuset_t *affinity;
2889 int cpu_id = 0;
2890
2891 pc = adapter->osdep.pc;
2892 tag = adapter->osdep.tag;
2893
2894 adapter->osdep.nintrs = adapter->num_queues + 1;
2895 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2896 adapter->osdep.nintrs) != 0) {
2897 aprint_error_dev(dev,
2898 "failed to allocate MSI-X interrupt\n");
2899 return (ENXIO);
2900 }
2901
2902 kcpuset_create(&affinity, false);
2903 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2904 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2905 device_xname(dev), i);
2906 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2907 sizeof(intrbuf));
2908 #ifdef IXGBE_MPSAFE
2909 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2910 true);
2911 #endif
2912 /* Set the handler function */
2913 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2914 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2915 intr_xname);
2916 if (que->res == NULL) {
2917 pci_intr_release(pc, adapter->osdep.intrs,
2918 adapter->osdep.nintrs);
2919 aprint_error_dev(dev,
2920 "Failed to register QUE handler\n");
2921 kcpuset_destroy(affinity);
2922 return (ENXIO);
2923 }
2924 que->msix = vector;
2925 adapter->active_queues |= (u64)(1 << que->msix);
2926
2927 cpu_id = i;
2928 /* Round-robin affinity */
2929 kcpuset_zero(affinity);
2930 kcpuset_set(affinity, cpu_id % ncpu);
2931 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2932 NULL);
2933 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2934 intrstr);
2935 if (error == 0)
2936 aprint_normal(", bound queue %d to cpu %d\n",
2937 i, cpu_id % ncpu);
2938 else
2939 aprint_normal("\n");
2940
2941 #ifndef IXGBE_LEGACY_TX
2942 txr->txr_si
2943 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2944 ixgbe_deferred_mq_start, txr);
2945 #endif
2946 que->que_si
2947 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2948 ixv_handle_que, que);
2949 if (que->que_si == NULL) {
2950 aprint_error_dev(dev,
2951 "could not establish software interrupt\n");
2952 }
2953 }
2954 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
2955 error = workqueue_create(&adapter->txr_wq, wqname,
2956 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2957 IXGBE_WORKQUEUE_FLAGS);
2958 if (error) {
2959 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
2960 }
2961 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
2962
2963 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
2964 error = workqueue_create(&adapter->que_wq, wqname,
2965 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2966 IXGBE_WORKQUEUE_FLAGS);
2967 if (error) {
2968 aprint_error_dev(dev,
2969 "couldn't create workqueue\n");
2970 }
2971
2972 /* and Mailbox */
2973 cpu_id++;
2974 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2975 adapter->vector = vector;
2976 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2977 sizeof(intrbuf));
2978 #ifdef IXGBE_MPSAFE
2979 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2980 true);
2981 #endif
2982 /* Set the mbx handler function */
2983 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2984 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2985 intr_xname);
2986 if (adapter->osdep.ihs[vector] == NULL) {
2987 adapter->res = NULL;
2988 aprint_error_dev(dev, "Failed to register LINK handler\n");
2989 kcpuset_destroy(affinity);
2990 return (ENXIO);
2991 }
2992 /* Round-robin affinity */
2993 kcpuset_zero(affinity);
2994 kcpuset_set(affinity, cpu_id % ncpu);
2995 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2996
2997 aprint_normal_dev(dev,
2998 "for link, interrupting at %s", intrstr);
2999 if (error == 0)
3000 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3001 else
3002 aprint_normal("\n");
3003
3004 /* Tasklets for Mailbox */
3005 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
3006 ixv_handle_link, adapter);
3007 /*
3008 * Due to a broken design QEMU will fail to properly
3009 * enable the guest for MSI-X unless the vectors in
3010 * the table are all set up, so we must rewrite the
3011 * ENABLE in the MSI-X control register again at this
3012 * point to cause it to successfully initialize us.
3013 */
3014 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3015 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3016 rid += PCI_MSIX_CTL;
3017 msix_ctrl = pci_conf_read(pc, tag, rid);
3018 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3019 pci_conf_write(pc, tag, rid, msix_ctrl);
3020 }
3021
3022 kcpuset_destroy(affinity);
3023 return (0);
3024 } /* ixv_allocate_msix */
3025
3026 /************************************************************************
3027 * ixv_configure_interrupts - Setup MSI-X resources
3028 *
3029 * Note: The VF device MUST use MSI-X, there is no fallback.
3030 ************************************************************************/
3031 static int
3032 ixv_configure_interrupts(struct adapter *adapter)
3033 {
3034 device_t dev = adapter->dev;
3035 int want, queues, msgs;
3036
3037 /* Must have at least 2 MSI-X vectors */
3038 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3039 if (msgs < 2) {
3040 aprint_error_dev(dev, "MSIX config error\n");
3041 return (ENXIO);
3042 }
3043 msgs = MIN(msgs, IXG_MAX_NINTR);
3044
3045 /* Figure out a reasonable auto config value */
3046 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3047
3048 if (ixv_num_queues != 0)
3049 queues = ixv_num_queues;
3050 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3051 queues = IXGBE_VF_MAX_TX_QUEUES;
3052
3053 /*
3054 * Want vectors for the queues,
3055 * plus an additional for mailbox.
3056 */
3057 want = queues + 1;
3058 if (msgs >= want)
3059 msgs = want;
3060 else {
3061 aprint_error_dev(dev,
3062 "MSI-X Configuration Problem, "
3063 "%d vectors but %d queues wanted!\n",
3064 msgs, want);
3065 return -1;
3066 }
3067
3068 adapter->msix_mem = (void *)1; /* XXX */
3069 aprint_normal_dev(dev,
3070 "Using MSI-X interrupts with %d vectors\n", msgs);
3071 adapter->num_queues = queues;
3072
3073 return (0);
3074 } /* ixv_configure_interrupts */
3075
3076
3077 /************************************************************************
3078 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3079 *
3080 * Done outside of interrupt context since the driver might sleep
3081 ************************************************************************/
3082 static void
3083 ixv_handle_link(void *context)
3084 {
3085 struct adapter *adapter = context;
3086
3087 IXGBE_CORE_LOCK(adapter);
3088
3089 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3090 &adapter->link_up, FALSE);
3091 ixv_update_link_status(adapter);
3092
3093 IXGBE_CORE_UNLOCK(adapter);
3094 } /* ixv_handle_link */
3095
3096 /************************************************************************
3097 * ixv_check_link - Used in the local timer to poll for link changes
3098 ************************************************************************/
3099 static void
3100 ixv_check_link(struct adapter *adapter)
3101 {
3102
3103 KASSERT(mutex_owned(&adapter->core_mtx));
3104
3105 adapter->hw.mac.get_link_status = TRUE;
3106
3107 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3108 &adapter->link_up, FALSE);
3109 ixv_update_link_status(adapter);
3110 } /* ixv_check_link */
3111