ixv.c revision 1.85 1 /*$NetBSD: ixv.c,v 1.85 2018/03/07 03:29:10 msaitoh Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct ix_queue *, uint32_t);
122
123 static void ixv_setup_vlan_support(struct adapter *);
124 #if 0
125 static void ixv_register_vlan(void *, struct ifnet *, u16);
126 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
127 #endif
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134
135
136 /* Sysctl handlers */
137 static void ixv_set_sysctl_value(struct adapter *, const char *,
138 const char *, int *, int);
139 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
140 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
144
145 /* The MSI-X Interrupt handlers */
146 static int ixv_msix_que(void *);
147 static int ixv_msix_mbx(void *);
148
149 /* Deferred interrupt tasklets */
150 static void ixv_handle_que(void *);
151 static void ixv_handle_link(void *);
152
153 /* Workqueue handler for deferred work */
154 static void ixv_handle_que_work(struct work *, void *);
155
156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
157 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
158
159 /************************************************************************
160 * FreeBSD Device Interface Entry Points
161 ************************************************************************/
162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
164 DVF_DETACH_SHUTDOWN);
165
166 #if 0
167 static driver_t ixv_driver = {
168 "ixv", ixv_methods, sizeof(struct adapter),
169 };
170
171 devclass_t ixv_devclass;
172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
175 #endif
176
177 /*
178 * TUNEABLE PARAMETERS:
179 */
180
181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
182 static int ixv_num_queues = 0;
183 #define TUNABLE_INT(__x, __y)
184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
185
186 /*
187 * AIM: Adaptive Interrupt Moderation
188 * which means that the interrupt rate
189 * is varied over time based on the
190 * traffic for that interrupt vector
191 */
192 static bool ixv_enable_aim = false;
193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194
195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
197
198 /* How many packets rxeof tries to clean at a time */
199 static int ixv_rx_process_limit = 256;
200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201
202 /* How many packets txeof tries to clean at a time */
203 static int ixv_tx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
205
206 /* Which pakcet processing uses workqueue or softint */
207 static bool ixv_txrx_workqueue = false;
208
209 /*
210 * Number of TX descriptors per ring,
211 * setting higher than RX as this seems
212 * the better performing choice.
213 */
214 static int ixv_txd = PERFORM_TXD;
215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
216
217 /* Number of RX descriptors per ring */
218 static int ixv_rxd = PERFORM_RXD;
219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
220
221 /* Legacy Transmit (single queue) */
222 static int ixv_enable_legacy_tx = 0;
223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
224
225 #ifdef NET_MPSAFE
226 #define IXGBE_MPSAFE 1
227 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
228 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
229 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
230 #else
231 #define IXGBE_CALLOUT_FLAGS 0
232 #define IXGBE_SOFTINFT_FLAGS 0
233 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
234 #endif
235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
236
237 #if 0
238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
240 #endif
241
242 /************************************************************************
243 * ixv_probe - Device identification routine
244 *
245 * Determines if the driver should be loaded on
246 * adapter based on its PCI vendor/device ID.
247 *
248 * return BUS_PROBE_DEFAULT on success, positive on failure
249 ************************************************************************/
250 static int
251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
252 {
253 #ifdef __HAVE_PCI_MSI_MSIX
254 const struct pci_attach_args *pa = aux;
255
256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
257 #else
258 return 0;
259 #endif
260 } /* ixv_probe */
261
262 static ixgbe_vendor_info_t *
263 ixv_lookup(const struct pci_attach_args *pa)
264 {
265 ixgbe_vendor_info_t *ent;
266 pcireg_t subid;
267
268 INIT_DEBUGOUT("ixv_lookup: begin");
269
270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
271 return NULL;
272
273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
274
275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
279 (ent->subvendor_id == 0)) &&
280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
281 (ent->subdevice_id == 0))) {
282 return ent;
283 }
284 }
285
286 return NULL;
287 }
288
289 /************************************************************************
290 * ixv_attach - Device initialization routine
291 *
292 * Called when the driver is being loaded.
293 * Identifies the type of hardware, allocates all resources
294 * and initializes the hardware.
295 *
296 * return 0 on success, positive on failure
297 ************************************************************************/
298 static void
299 ixv_attach(device_t parent, device_t dev, void *aux)
300 {
301 struct adapter *adapter;
302 struct ixgbe_hw *hw;
303 int error = 0;
304 pcireg_t id, subid;
305 ixgbe_vendor_info_t *ent;
306 const struct pci_attach_args *pa = aux;
307 const char *apivstr;
308 const char *str;
309 char buf[256];
310
311 INIT_DEBUGOUT("ixv_attach: begin");
312
313 /*
314 * Make sure BUSMASTER is set, on a VM under
315 * KVM it may not be and will break things.
316 */
317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
318
319 /* Allocate, clear, and link in our adapter structure */
320 adapter = device_private(dev);
321 adapter->dev = dev;
322 adapter->hw.back = adapter;
323 hw = &adapter->hw;
324
325 adapter->init_locked = ixv_init_locked;
326 adapter->stop_locked = ixv_stop;
327
328 adapter->osdep.pc = pa->pa_pc;
329 adapter->osdep.tag = pa->pa_tag;
330 if (pci_dma64_available(pa))
331 adapter->osdep.dmat = pa->pa_dmat64;
332 else
333 adapter->osdep.dmat = pa->pa_dmat;
334 adapter->osdep.attached = false;
335
336 ent = ixv_lookup(pa);
337
338 KASSERT(ent != NULL);
339
340 aprint_normal(": %s, Version - %s\n",
341 ixv_strings[ent->index], ixv_driver_version);
342
343 /* Core Lock Init*/
344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
345
346 /* Do base PCI setup - map BAR0 */
347 if (ixv_allocate_pci_resources(adapter, pa)) {
348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
349 error = ENXIO;
350 goto err_out;
351 }
352
353 /* SYSCTL APIs */
354 ixv_add_device_sysctls(adapter);
355
356 /* Set up the timer callout */
357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
358
359 /* Save off the information about this board */
360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
362 hw->vendor_id = PCI_VENDOR(id);
363 hw->device_id = PCI_PRODUCT(id);
364 hw->revision_id =
365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
368
369 /* A subset of set_mac_type */
370 switch (hw->device_id) {
371 case IXGBE_DEV_ID_82599_VF:
372 hw->mac.type = ixgbe_mac_82599_vf;
373 str = "82599 VF";
374 break;
375 case IXGBE_DEV_ID_X540_VF:
376 hw->mac.type = ixgbe_mac_X540_vf;
377 str = "X540 VF";
378 break;
379 case IXGBE_DEV_ID_X550_VF:
380 hw->mac.type = ixgbe_mac_X550_vf;
381 str = "X550 VF";
382 break;
383 case IXGBE_DEV_ID_X550EM_X_VF:
384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
385 str = "X550EM X VF";
386 break;
387 case IXGBE_DEV_ID_X550EM_A_VF:
388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
389 str = "X550EM A VF";
390 break;
391 default:
392 /* Shouldn't get here since probe succeeded */
393 aprint_error_dev(dev, "Unknown device ID!\n");
394 error = ENXIO;
395 goto err_out;
396 break;
397 }
398 aprint_normal_dev(dev, "device %s\n", str);
399
400 ixv_init_device_features(adapter);
401
402 /* Initialize the shared code */
403 error = ixgbe_init_ops_vf(hw);
404 if (error) {
405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
406 error = EIO;
407 goto err_out;
408 }
409
410 /* Setup the mailbox */
411 ixgbe_init_mbx_params_vf(hw);
412
413 /* Set the right number of segments */
414 adapter->num_segs = IXGBE_82599_SCATTER;
415
416 /* Reset mbox api to 1.0 */
417 error = hw->mac.ops.reset_hw(hw);
418 if (error == IXGBE_ERR_RESET_FAILED)
419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
420 else if (error)
421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
422 error);
423 if (error) {
424 error = EIO;
425 goto err_out;
426 }
427
428 error = hw->mac.ops.init_hw(hw);
429 if (error) {
430 aprint_error_dev(dev, "...init_hw() failed!\n");
431 error = EIO;
432 goto err_out;
433 }
434
435 /* Negotiate mailbox API version */
436 error = ixv_negotiate_api(adapter);
437 if (error)
438 aprint_normal_dev(dev,
439 "MBX API negotiation failed during attach!\n");
440 switch (hw->api_version) {
441 case ixgbe_mbox_api_10:
442 apivstr = "1.0";
443 break;
444 case ixgbe_mbox_api_20:
445 apivstr = "2.0";
446 break;
447 case ixgbe_mbox_api_11:
448 apivstr = "1.1";
449 break;
450 case ixgbe_mbox_api_12:
451 apivstr = "1.2";
452 break;
453 case ixgbe_mbox_api_13:
454 apivstr = "1.3";
455 break;
456 default:
457 apivstr = "unknown";
458 break;
459 }
460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
461
462 /* If no mac address was assigned, make a random one */
463 if (!ixv_check_ether_addr(hw->mac.addr)) {
464 u8 addr[ETHER_ADDR_LEN];
465 uint64_t rndval = cprng_strong64();
466
467 memcpy(addr, &rndval, sizeof(addr));
468 addr[0] &= 0xFE;
469 addr[0] |= 0x02;
470 bcopy(addr, hw->mac.addr, sizeof(addr));
471 }
472
473 /* Register for VLAN events */
474 #if 0 /* XXX delete after write? */
475 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
476 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
477 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
478 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
479 #endif
480
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixv_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixv_rx_process_limit);
485
486 ixv_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixv_tx_process_limit);
489
490 /* Do descriptor calc and sanity checks */
491 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
493 aprint_error_dev(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
495 } else
496 adapter->num_tx_desc = ixv_txd;
497
498 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
499 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
500 aprint_error_dev(dev, "RXD config issue, using default!\n");
501 adapter->num_rx_desc = DEFAULT_RXD;
502 } else
503 adapter->num_rx_desc = ixv_rxd;
504
505 /* Setup MSI-X */
506 error = ixv_configure_interrupts(adapter);
507 if (error)
508 goto err_out;
509
510 /* Allocate our TX/RX Queues */
511 if (ixgbe_allocate_queues(adapter)) {
512 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
513 error = ENOMEM;
514 goto err_out;
515 }
516
517 /* hw.ix defaults init */
518 adapter->enable_aim = ixv_enable_aim;
519
520 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
521
522 error = ixv_allocate_msix(adapter, pa);
523 if (error) {
524 device_printf(dev, "ixv_allocate_msix() failed!\n");
525 goto err_late;
526 }
527
528 /* Setup OS specific network interface */
529 error = ixv_setup_interface(dev, adapter);
530 if (error != 0) {
531 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
532 goto err_late;
533 }
534
535 /* Do the stats setup */
536 ixv_save_stats(adapter);
537 ixv_init_stats(adapter);
538 ixv_add_stats_sysctls(adapter);
539
540 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
541 ixgbe_netmap_attach(adapter);
542
543 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
544 aprint_verbose_dev(dev, "feature cap %s\n", buf);
545 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
546 aprint_verbose_dev(dev, "feature ena %s\n", buf);
547
548 INIT_DEBUGOUT("ixv_attach: end");
549 adapter->osdep.attached = true;
550
551 return;
552
553 err_late:
554 ixgbe_free_transmit_structures(adapter);
555 ixgbe_free_receive_structures(adapter);
556 free(adapter->queues, M_DEVBUF);
557 err_out:
558 ixv_free_pci_resources(adapter);
559 IXGBE_CORE_LOCK_DESTROY(adapter);
560
561 return;
562 } /* ixv_attach */
563
564 /************************************************************************
565 * ixv_detach - Device removal routine
566 *
567 * Called when the driver is being removed.
568 * Stops the adapter and deallocates all the resources
569 * that were allocated for driver operation.
570 *
571 * return 0 on success, positive on failure
572 ************************************************************************/
573 static int
574 ixv_detach(device_t dev, int flags)
575 {
576 struct adapter *adapter = device_private(dev);
577 struct ixgbe_hw *hw = &adapter->hw;
578 struct ix_queue *que = adapter->queues;
579 struct tx_ring *txr = adapter->tx_rings;
580 struct rx_ring *rxr = adapter->rx_rings;
581 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
582
583 INIT_DEBUGOUT("ixv_detach: begin");
584 if (adapter->osdep.attached == false)
585 return 0;
586
587 /* Stop the interface. Callouts are stopped in it. */
588 ixv_ifstop(adapter->ifp, 1);
589
590 #if NVLAN > 0
591 /* Make sure VLANs are not using driver */
592 if (!VLAN_ATTACHED(&adapter->osdep.ec))
593 ; /* nothing to do: no VLANs */
594 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
595 vlan_ifdetach(adapter->ifp);
596 else {
597 aprint_error_dev(dev, "VLANs in use, detach first\n");
598 return EBUSY;
599 }
600 #endif
601
602 IXGBE_CORE_LOCK(adapter);
603 ixv_stop(adapter);
604 IXGBE_CORE_UNLOCK(adapter);
605
606 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
607 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
608 softint_disestablish(txr->txr_si);
609 softint_disestablish(que->que_si);
610 }
611 if (adapter->txr_wq != NULL)
612 workqueue_destroy(adapter->txr_wq);
613 if (adapter->txr_wq_enqueued != NULL)
614 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
615 if (adapter->que_wq != NULL)
616 workqueue_destroy(adapter->que_wq);
617
618 /* Drain the Mailbox(link) queue */
619 softint_disestablish(adapter->link_si);
620
621 /* Unregister VLAN events */
622 #if 0 /* XXX msaitoh delete after write? */
623 if (adapter->vlan_attach != NULL)
624 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
625 if (adapter->vlan_detach != NULL)
626 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
627 #endif
628
629 ether_ifdetach(adapter->ifp);
630 callout_halt(&adapter->timer, NULL);
631
632 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
633 netmap_detach(adapter->ifp);
634
635 ixv_free_pci_resources(adapter);
636 #if 0 /* XXX the NetBSD port is probably missing something here */
637 bus_generic_detach(dev);
638 #endif
639 if_detach(adapter->ifp);
640 if_percpuq_destroy(adapter->ipq);
641
642 sysctl_teardown(&adapter->sysctllog);
643 evcnt_detach(&adapter->efbig_tx_dma_setup);
644 evcnt_detach(&adapter->mbuf_defrag_failed);
645 evcnt_detach(&adapter->efbig2_tx_dma_setup);
646 evcnt_detach(&adapter->einval_tx_dma_setup);
647 evcnt_detach(&adapter->other_tx_dma_setup);
648 evcnt_detach(&adapter->eagain_tx_dma_setup);
649 evcnt_detach(&adapter->enomem_tx_dma_setup);
650 evcnt_detach(&adapter->watchdog_events);
651 evcnt_detach(&adapter->tso_err);
652 evcnt_detach(&adapter->link_irq);
653
654 txr = adapter->tx_rings;
655 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
656 evcnt_detach(&adapter->queues[i].irqs);
657 evcnt_detach(&adapter->queues[i].handleq);
658 evcnt_detach(&adapter->queues[i].req);
659 evcnt_detach(&txr->no_desc_avail);
660 evcnt_detach(&txr->total_packets);
661 evcnt_detach(&txr->tso_tx);
662 #ifndef IXGBE_LEGACY_TX
663 evcnt_detach(&txr->pcq_drops);
664 #endif
665
666 evcnt_detach(&rxr->rx_packets);
667 evcnt_detach(&rxr->rx_bytes);
668 evcnt_detach(&rxr->rx_copies);
669 evcnt_detach(&rxr->no_jmbuf);
670 evcnt_detach(&rxr->rx_discarded);
671 }
672 evcnt_detach(&stats->ipcs);
673 evcnt_detach(&stats->l4cs);
674 evcnt_detach(&stats->ipcs_bad);
675 evcnt_detach(&stats->l4cs_bad);
676
677 /* Packet Reception Stats */
678 evcnt_detach(&stats->vfgorc);
679 evcnt_detach(&stats->vfgprc);
680 evcnt_detach(&stats->vfmprc);
681
682 /* Packet Transmission Stats */
683 evcnt_detach(&stats->vfgotc);
684 evcnt_detach(&stats->vfgptc);
685
686 /* Mailbox Stats */
687 evcnt_detach(&hw->mbx.stats.msgs_tx);
688 evcnt_detach(&hw->mbx.stats.msgs_rx);
689 evcnt_detach(&hw->mbx.stats.acks);
690 evcnt_detach(&hw->mbx.stats.reqs);
691 evcnt_detach(&hw->mbx.stats.rsts);
692
693 ixgbe_free_transmit_structures(adapter);
694 ixgbe_free_receive_structures(adapter);
695 for (int i = 0; i < adapter->num_queues; i++) {
696 struct ix_queue *lque = &adapter->queues[i];
697 mutex_destroy(&lque->im_mtx);
698 }
699 free(adapter->queues, M_DEVBUF);
700
701 IXGBE_CORE_LOCK_DESTROY(adapter);
702
703 return (0);
704 } /* ixv_detach */
705
706 /************************************************************************
707 * ixv_init_locked - Init entry point
708 *
709 * Used in two ways: It is used by the stack as an init entry
710 * point in network interface structure. It is also used
711 * by the driver as a hw/sw initialization routine to get
712 * to a consistent state.
713 *
714 * return 0 on success, positive on failure
715 ************************************************************************/
716 static void
717 ixv_init_locked(struct adapter *adapter)
718 {
719 struct ifnet *ifp = adapter->ifp;
720 device_t dev = adapter->dev;
721 struct ixgbe_hw *hw = &adapter->hw;
722 struct ix_queue *que = adapter->queues;
723 int error = 0;
724 uint32_t mask;
725 int i;
726
727 INIT_DEBUGOUT("ixv_init_locked: begin");
728 KASSERT(mutex_owned(&adapter->core_mtx));
729 hw->adapter_stopped = FALSE;
730 hw->mac.ops.stop_adapter(hw);
731 callout_stop(&adapter->timer);
732
733 /* reprogram the RAR[0] in case user changed it. */
734 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
735
736 /* Get the latest mac address, User can use a LAA */
737 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
738 IXGBE_ETH_LENGTH_OF_ADDRESS);
739 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
740
741 /* Prepare transmit descriptors and buffers */
742 if (ixgbe_setup_transmit_structures(adapter)) {
743 aprint_error_dev(dev, "Could not setup transmit structures\n");
744 ixv_stop(adapter);
745 return;
746 }
747
748 /* Reset VF and renegotiate mailbox API version */
749 hw->mac.ops.reset_hw(hw);
750 error = ixv_negotiate_api(adapter);
751 if (error)
752 device_printf(dev,
753 "Mailbox API negotiation failed in init_locked!\n");
754
755 ixv_initialize_transmit_units(adapter);
756
757 /* Setup Multicast table */
758 ixv_set_multi(adapter);
759
760 /*
761 * Determine the correct mbuf pool
762 * for doing jumbo/headersplit
763 */
764 if (ifp->if_mtu > ETHERMTU)
765 adapter->rx_mbuf_sz = MJUMPAGESIZE;
766 else
767 adapter->rx_mbuf_sz = MCLBYTES;
768
769 /* Prepare receive descriptors and buffers */
770 if (ixgbe_setup_receive_structures(adapter)) {
771 device_printf(dev, "Could not setup receive structures\n");
772 ixv_stop(adapter);
773 return;
774 }
775
776 /* Configure RX settings */
777 ixv_initialize_receive_units(adapter);
778
779 #if 0 /* XXX isn't it required? -- msaitoh */
780 /* Set the various hardware offload abilities */
781 ifp->if_hwassist = 0;
782 if (ifp->if_capenable & IFCAP_TSO4)
783 ifp->if_hwassist |= CSUM_TSO;
784 if (ifp->if_capenable & IFCAP_TXCSUM) {
785 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
786 #if __FreeBSD_version >= 800000
787 ifp->if_hwassist |= CSUM_SCTP;
788 #endif
789 }
790 #endif
791
792 /* Set up VLAN offload and filter */
793 ixv_setup_vlan_support(adapter);
794
795 /* Set up MSI-X routing */
796 ixv_configure_ivars(adapter);
797
798 /* Set up auto-mask */
799 mask = (1 << adapter->vector);
800 for (i = 0; i < adapter->num_queues; i++, que++)
801 mask |= (1 << que->msix);
802 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
803
804 /* Set moderation on the Link interrupt */
805 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
806
807 /* Stats init */
808 ixv_init_stats(adapter);
809
810 /* Config/Enable Link */
811 hw->mac.get_link_status = TRUE;
812 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
813 FALSE);
814
815 /* Start watchdog */
816 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
817
818 /* And now turn on interrupts */
819 ixv_enable_intr(adapter);
820
821 /* Update saved flags. See ixgbe_ifflags_cb() */
822 adapter->if_flags = ifp->if_flags;
823
824 /* Now inform the stack we're ready */
825 ifp->if_flags |= IFF_RUNNING;
826 ifp->if_flags &= ~IFF_OACTIVE;
827
828 return;
829 } /* ixv_init_locked */
830
831 /*
832 * MSI-X Interrupt Handlers and Tasklets
833 */
834
835 static inline void
836 ixv_enable_queue(struct adapter *adapter, u32 vector)
837 {
838 struct ixgbe_hw *hw = &adapter->hw;
839 struct ix_queue *que = &adapter->queues[vector];
840 u32 queue = 1 << vector;
841 u32 mask;
842
843 mutex_enter(&que->im_mtx);
844 if (que->im_nest > 0 && --que->im_nest > 0)
845 goto out;
846
847 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
848 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
849 out:
850 mutex_exit(&que->im_mtx);
851 } /* ixv_enable_queue */
852
853 static inline void
854 ixv_disable_queue(struct adapter *adapter, u32 vector)
855 {
856 struct ixgbe_hw *hw = &adapter->hw;
857 struct ix_queue *que = &adapter->queues[vector];
858 u64 queue = (u64)(1 << vector);
859 u32 mask;
860
861 mutex_enter(&que->im_mtx);
862 if (que->im_nest++ > 0)
863 goto out;
864
865 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
866 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
867 out:
868 mutex_exit(&que->im_mtx);
869 } /* ixv_disable_queue */
870
871 static inline void
872 ixv_rearm_queues(struct adapter *adapter, u64 queues)
873 {
874 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
875 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
876 } /* ixv_rearm_queues */
877
878
879 /************************************************************************
880 * ixv_msix_que - MSI Queue Interrupt Service routine
881 ************************************************************************/
882 static int
883 ixv_msix_que(void *arg)
884 {
885 struct ix_queue *que = arg;
886 struct adapter *adapter = que->adapter;
887 struct tx_ring *txr = que->txr;
888 struct rx_ring *rxr = que->rxr;
889 bool more;
890 u32 newitr = 0;
891
892 ixv_disable_queue(adapter, que->msix);
893 ++que->irqs.ev_count;
894
895 #ifdef __NetBSD__
896 /* Don't run ixgbe_rxeof in interrupt context */
897 more = true;
898 #else
899 more = ixgbe_rxeof(que);
900 #endif
901
902 IXGBE_TX_LOCK(txr);
903 ixgbe_txeof(txr);
904 IXGBE_TX_UNLOCK(txr);
905
906 /* Do AIM now? */
907
908 if (adapter->enable_aim == false)
909 goto no_calc;
910 /*
911 * Do Adaptive Interrupt Moderation:
912 * - Write out last calculated setting
913 * - Calculate based on average size over
914 * the last interval.
915 */
916 if (que->eitr_setting)
917 ixv_eitr_write(que, que->eitr_setting);
918
919 que->eitr_setting = 0;
920
921 /* Idle, do nothing */
922 if ((txr->bytes == 0) && (rxr->bytes == 0))
923 goto no_calc;
924
925 if ((txr->bytes) && (txr->packets))
926 newitr = txr->bytes/txr->packets;
927 if ((rxr->bytes) && (rxr->packets))
928 newitr = max(newitr, (rxr->bytes / rxr->packets));
929 newitr += 24; /* account for hardware frame, crc */
930
931 /* set an upper boundary */
932 newitr = min(newitr, 3000);
933
934 /* Be nice to the mid range */
935 if ((newitr > 300) && (newitr < 1200))
936 newitr = (newitr / 3);
937 else
938 newitr = (newitr / 2);
939
940 /*
941 * When RSC is used, ITR interval must be larger than RSC_DELAY.
942 * Currently, we use 2us for RSC_DELAY. The minimum value is always
943 * greater than 2us on 100M (and 10M?(not documented)), but it's not
944 * on 1G and higher.
945 */
946 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
947 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
948 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
949 newitr = IXGBE_MIN_RSC_EITR_10G1G;
950 }
951
952 /* save for next interrupt */
953 que->eitr_setting = newitr;
954
955 /* Reset state */
956 txr->bytes = 0;
957 txr->packets = 0;
958 rxr->bytes = 0;
959 rxr->packets = 0;
960
961 no_calc:
962 if (more) {
963 que->req.ev_count++;
964 softint_schedule(que->que_si);
965 } else /* Re-enable this interrupt */
966 ixv_enable_queue(adapter, que->msix);
967
968 return 1;
969 } /* ixv_msix_que */
970
971 /************************************************************************
972 * ixv_msix_mbx
973 ************************************************************************/
974 static int
975 ixv_msix_mbx(void *arg)
976 {
977 struct adapter *adapter = arg;
978 struct ixgbe_hw *hw = &adapter->hw;
979
980 ++adapter->link_irq.ev_count;
981 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
982
983 /* Link status change */
984 hw->mac.get_link_status = TRUE;
985 softint_schedule(adapter->link_si);
986
987 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
988
989 return 1;
990 } /* ixv_msix_mbx */
991
992 static void
993 ixv_eitr_write(struct ix_queue *que, uint32_t itr)
994 {
995 struct adapter *adapter = que->adapter;
996
997 /*
998 * Newer devices than 82598 have VF function, so this function is
999 * simple.
1000 */
1001 itr |= IXGBE_EITR_CNT_WDIS;
1002
1003 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), itr);
1004 }
1005
1006
1007 /************************************************************************
1008 * ixv_media_status - Media Ioctl callback
1009 *
1010 * Called whenever the user queries the status of
1011 * the interface using ifconfig.
1012 ************************************************************************/
1013 static void
1014 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1015 {
1016 struct adapter *adapter = ifp->if_softc;
1017
1018 INIT_DEBUGOUT("ixv_media_status: begin");
1019 IXGBE_CORE_LOCK(adapter);
1020 ixv_update_link_status(adapter);
1021
1022 ifmr->ifm_status = IFM_AVALID;
1023 ifmr->ifm_active = IFM_ETHER;
1024
1025 if (!adapter->link_active) {
1026 ifmr->ifm_active |= IFM_NONE;
1027 IXGBE_CORE_UNLOCK(adapter);
1028 return;
1029 }
1030
1031 ifmr->ifm_status |= IFM_ACTIVE;
1032
1033 switch (adapter->link_speed) {
1034 case IXGBE_LINK_SPEED_10GB_FULL:
1035 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1036 break;
1037 case IXGBE_LINK_SPEED_5GB_FULL:
1038 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1039 break;
1040 case IXGBE_LINK_SPEED_2_5GB_FULL:
1041 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1042 break;
1043 case IXGBE_LINK_SPEED_1GB_FULL:
1044 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1045 break;
1046 case IXGBE_LINK_SPEED_100_FULL:
1047 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1048 break;
1049 case IXGBE_LINK_SPEED_10_FULL:
1050 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1051 break;
1052 }
1053
1054 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1055
1056 IXGBE_CORE_UNLOCK(adapter);
1057
1058 return;
1059 } /* ixv_media_status */
1060
1061 /************************************************************************
1062 * ixv_media_change - Media Ioctl callback
1063 *
1064 * Called when the user changes speed/duplex using
1065 * media/mediopt option with ifconfig.
1066 ************************************************************************/
1067 static int
1068 ixv_media_change(struct ifnet *ifp)
1069 {
1070 struct adapter *adapter = ifp->if_softc;
1071 struct ifmedia *ifm = &adapter->media;
1072
1073 INIT_DEBUGOUT("ixv_media_change: begin");
1074
1075 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1076 return (EINVAL);
1077
1078 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1079 case IFM_AUTO:
1080 break;
1081 default:
1082 device_printf(adapter->dev, "Only auto media type\n");
1083 return (EINVAL);
1084 }
1085
1086 return (0);
1087 } /* ixv_media_change */
1088
1089
1090 /************************************************************************
1091 * ixv_negotiate_api
1092 *
1093 * Negotiate the Mailbox API with the PF;
1094 * start with the most featured API first.
1095 ************************************************************************/
1096 static int
1097 ixv_negotiate_api(struct adapter *adapter)
1098 {
1099 struct ixgbe_hw *hw = &adapter->hw;
1100 int mbx_api[] = { ixgbe_mbox_api_11,
1101 ixgbe_mbox_api_10,
1102 ixgbe_mbox_api_unknown };
1103 int i = 0;
1104
1105 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1106 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1107 return (0);
1108 i++;
1109 }
1110
1111 return (EINVAL);
1112 } /* ixv_negotiate_api */
1113
1114
1115 /************************************************************************
1116 * ixv_set_multi - Multicast Update
1117 *
1118 * Called whenever multicast address list is updated.
1119 ************************************************************************/
1120 static void
1121 ixv_set_multi(struct adapter *adapter)
1122 {
1123 struct ether_multi *enm;
1124 struct ether_multistep step;
1125 struct ethercom *ec = &adapter->osdep.ec;
1126 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1127 u8 *update_ptr;
1128 int mcnt = 0;
1129
1130 KASSERT(mutex_owned(&adapter->core_mtx));
1131 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1132
1133 ETHER_LOCK(ec);
1134 ETHER_FIRST_MULTI(step, ec, enm);
1135 while (enm != NULL) {
1136 bcopy(enm->enm_addrlo,
1137 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1138 IXGBE_ETH_LENGTH_OF_ADDRESS);
1139 mcnt++;
1140 /* XXX This might be required --msaitoh */
1141 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1142 break;
1143 ETHER_NEXT_MULTI(step, enm);
1144 }
1145 ETHER_UNLOCK(ec);
1146
1147 update_ptr = mta;
1148
1149 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1150 ixv_mc_array_itr, TRUE);
1151
1152 return;
1153 } /* ixv_set_multi */
1154
1155 /************************************************************************
1156 * ixv_mc_array_itr
1157 *
1158 * An iterator function needed by the multicast shared code.
1159 * It feeds the shared code routine the addresses in the
1160 * array of ixv_set_multi() one by one.
1161 ************************************************************************/
1162 static u8 *
1163 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1164 {
1165 u8 *addr = *update_ptr;
1166 u8 *newptr;
1167 *vmdq = 0;
1168
1169 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1170 *update_ptr = newptr;
1171
1172 return addr;
1173 } /* ixv_mc_array_itr */
1174
1175 /************************************************************************
1176 * ixv_local_timer - Timer routine
1177 *
1178 * Checks for link status, updates statistics,
1179 * and runs the watchdog check.
1180 ************************************************************************/
1181 static void
1182 ixv_local_timer(void *arg)
1183 {
1184 struct adapter *adapter = arg;
1185
1186 IXGBE_CORE_LOCK(adapter);
1187 ixv_local_timer_locked(adapter);
1188 IXGBE_CORE_UNLOCK(adapter);
1189 }
1190
1191 static void
1192 ixv_local_timer_locked(void *arg)
1193 {
1194 struct adapter *adapter = arg;
1195 device_t dev = adapter->dev;
1196 struct ix_queue *que = adapter->queues;
1197 u64 queues = 0;
1198 int hung = 0;
1199
1200 KASSERT(mutex_owned(&adapter->core_mtx));
1201
1202 ixv_check_link(adapter);
1203
1204 /* Stats Update */
1205 ixv_update_stats(adapter);
1206
1207 /*
1208 * Check the TX queues status
1209 * - mark hung queues so we don't schedule on them
1210 * - watchdog only if all queues show hung
1211 */
1212 for (int i = 0; i < adapter->num_queues; i++, que++) {
1213 /* Keep track of queues with work for soft irq */
1214 if (que->txr->busy)
1215 queues |= ((u64)1 << que->me);
1216 /*
1217 * Each time txeof runs without cleaning, but there
1218 * are uncleaned descriptors it increments busy. If
1219 * we get to the MAX we declare it hung.
1220 */
1221 if (que->busy == IXGBE_QUEUE_HUNG) {
1222 ++hung;
1223 /* Mark the queue as inactive */
1224 adapter->active_queues &= ~((u64)1 << que->me);
1225 continue;
1226 } else {
1227 /* Check if we've come back from hung */
1228 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1229 adapter->active_queues |= ((u64)1 << que->me);
1230 }
1231 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1232 device_printf(dev,
1233 "Warning queue %d appears to be hung!\n", i);
1234 que->txr->busy = IXGBE_QUEUE_HUNG;
1235 ++hung;
1236 }
1237 }
1238
1239 /* Only truly watchdog if all queues show hung */
1240 if (hung == adapter->num_queues)
1241 goto watchdog;
1242 else if (queues != 0) { /* Force an IRQ on queues with work */
1243 ixv_rearm_queues(adapter, queues);
1244 }
1245
1246 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1247
1248 return;
1249
1250 watchdog:
1251
1252 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1253 adapter->ifp->if_flags &= ~IFF_RUNNING;
1254 adapter->watchdog_events.ev_count++;
1255 ixv_init_locked(adapter);
1256 } /* ixv_local_timer */
1257
1258 /************************************************************************
1259 * ixv_update_link_status - Update OS on link state
1260 *
1261 * Note: Only updates the OS on the cached link state.
1262 * The real check of the hardware only happens with
1263 * a link interrupt.
1264 ************************************************************************/
1265 static void
1266 ixv_update_link_status(struct adapter *adapter)
1267 {
1268 struct ifnet *ifp = adapter->ifp;
1269 device_t dev = adapter->dev;
1270
1271 if (adapter->link_up) {
1272 if (adapter->link_active == FALSE) {
1273 if (bootverbose) {
1274 const char *bpsmsg;
1275
1276 switch (adapter->link_speed) {
1277 case IXGBE_LINK_SPEED_10GB_FULL:
1278 bpsmsg = "10 Gbps";
1279 break;
1280 case IXGBE_LINK_SPEED_5GB_FULL:
1281 bpsmsg = "5 Gbps";
1282 break;
1283 case IXGBE_LINK_SPEED_2_5GB_FULL:
1284 bpsmsg = "2.5 Gbps";
1285 break;
1286 case IXGBE_LINK_SPEED_1GB_FULL:
1287 bpsmsg = "1 Gbps";
1288 break;
1289 case IXGBE_LINK_SPEED_100_FULL:
1290 bpsmsg = "100 Mbps";
1291 break;
1292 case IXGBE_LINK_SPEED_10_FULL:
1293 bpsmsg = "10 Mbps";
1294 break;
1295 default:
1296 bpsmsg = "unknown speed";
1297 break;
1298 }
1299 device_printf(dev, "Link is up %s %s \n",
1300 bpsmsg, "Full Duplex");
1301 }
1302 adapter->link_active = TRUE;
1303 if_link_state_change(ifp, LINK_STATE_UP);
1304 }
1305 } else { /* Link down */
1306 if (adapter->link_active == TRUE) {
1307 if (bootverbose)
1308 device_printf(dev, "Link is Down\n");
1309 if_link_state_change(ifp, LINK_STATE_DOWN);
1310 adapter->link_active = FALSE;
1311 }
1312 }
1313
1314 return;
1315 } /* ixv_update_link_status */
1316
1317
1318 /************************************************************************
1319 * ixv_stop - Stop the hardware
1320 *
1321 * Disables all traffic on the adapter by issuing a
1322 * global reset on the MAC and deallocates TX/RX buffers.
1323 ************************************************************************/
1324 static void
1325 ixv_ifstop(struct ifnet *ifp, int disable)
1326 {
1327 struct adapter *adapter = ifp->if_softc;
1328
1329 IXGBE_CORE_LOCK(adapter);
1330 ixv_stop(adapter);
1331 IXGBE_CORE_UNLOCK(adapter);
1332 }
1333
1334 static void
1335 ixv_stop(void *arg)
1336 {
1337 struct ifnet *ifp;
1338 struct adapter *adapter = arg;
1339 struct ixgbe_hw *hw = &adapter->hw;
1340
1341 ifp = adapter->ifp;
1342
1343 KASSERT(mutex_owned(&adapter->core_mtx));
1344
1345 INIT_DEBUGOUT("ixv_stop: begin\n");
1346 ixv_disable_intr(adapter);
1347
1348 /* Tell the stack that the interface is no longer active */
1349 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1350
1351 hw->mac.ops.reset_hw(hw);
1352 adapter->hw.adapter_stopped = FALSE;
1353 hw->mac.ops.stop_adapter(hw);
1354 callout_stop(&adapter->timer);
1355
1356 /* reprogram the RAR[0] in case user changed it. */
1357 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1358
1359 return;
1360 } /* ixv_stop */
1361
1362
1363 /************************************************************************
1364 * ixv_allocate_pci_resources
1365 ************************************************************************/
1366 static int
1367 ixv_allocate_pci_resources(struct adapter *adapter,
1368 const struct pci_attach_args *pa)
1369 {
1370 pcireg_t memtype;
1371 device_t dev = adapter->dev;
1372 bus_addr_t addr;
1373 int flags;
1374
1375 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1376 switch (memtype) {
1377 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1378 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1379 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1380 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1381 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1382 goto map_err;
1383 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1384 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1385 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1386 }
1387 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1388 adapter->osdep.mem_size, flags,
1389 &adapter->osdep.mem_bus_space_handle) != 0) {
1390 map_err:
1391 adapter->osdep.mem_size = 0;
1392 aprint_error_dev(dev, "unable to map BAR0\n");
1393 return ENXIO;
1394 }
1395 break;
1396 default:
1397 aprint_error_dev(dev, "unexpected type on BAR0\n");
1398 return ENXIO;
1399 }
1400
1401 /* Pick up the tuneable queues */
1402 adapter->num_queues = ixv_num_queues;
1403
1404 return (0);
1405 } /* ixv_allocate_pci_resources */
1406
1407 /************************************************************************
1408 * ixv_free_pci_resources
1409 ************************************************************************/
1410 static void
1411 ixv_free_pci_resources(struct adapter * adapter)
1412 {
1413 struct ix_queue *que = adapter->queues;
1414 int rid;
1415
1416 /*
1417 * Release all msix queue resources:
1418 */
1419 for (int i = 0; i < adapter->num_queues; i++, que++) {
1420 if (que->res != NULL)
1421 pci_intr_disestablish(adapter->osdep.pc,
1422 adapter->osdep.ihs[i]);
1423 }
1424
1425
1426 /* Clean the Mailbox interrupt last */
1427 rid = adapter->vector;
1428
1429 if (adapter->osdep.ihs[rid] != NULL) {
1430 pci_intr_disestablish(adapter->osdep.pc,
1431 adapter->osdep.ihs[rid]);
1432 adapter->osdep.ihs[rid] = NULL;
1433 }
1434
1435 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1436 adapter->osdep.nintrs);
1437
1438 if (adapter->osdep.mem_size != 0) {
1439 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1440 adapter->osdep.mem_bus_space_handle,
1441 adapter->osdep.mem_size);
1442 }
1443
1444 return;
1445 } /* ixv_free_pci_resources */
1446
1447 /************************************************************************
1448 * ixv_setup_interface
1449 *
1450 * Setup networking device structure and register an interface.
1451 ************************************************************************/
1452 static int
1453 ixv_setup_interface(device_t dev, struct adapter *adapter)
1454 {
1455 struct ethercom *ec = &adapter->osdep.ec;
1456 struct ifnet *ifp;
1457 int rv;
1458
1459 INIT_DEBUGOUT("ixv_setup_interface: begin");
1460
1461 ifp = adapter->ifp = &ec->ec_if;
1462 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1463 ifp->if_baudrate = IF_Gbps(10);
1464 ifp->if_init = ixv_init;
1465 ifp->if_stop = ixv_ifstop;
1466 ifp->if_softc = adapter;
1467 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1468 #ifdef IXGBE_MPSAFE
1469 ifp->if_extflags = IFEF_MPSAFE;
1470 #endif
1471 ifp->if_ioctl = ixv_ioctl;
1472 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1473 #if 0
1474 ixv_start_locked = ixgbe_legacy_start_locked;
1475 #endif
1476 } else {
1477 ifp->if_transmit = ixgbe_mq_start;
1478 #if 0
1479 ixv_start_locked = ixgbe_mq_start_locked;
1480 #endif
1481 }
1482 ifp->if_start = ixgbe_legacy_start;
1483 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1484 IFQ_SET_READY(&ifp->if_snd);
1485
1486 rv = if_initialize(ifp);
1487 if (rv != 0) {
1488 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1489 return rv;
1490 }
1491 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1492 ether_ifattach(ifp, adapter->hw.mac.addr);
1493 /*
1494 * We use per TX queue softint, so if_deferred_start_init() isn't
1495 * used.
1496 */
1497 if_register(ifp);
1498 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1499
1500 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1501
1502 /*
1503 * Tell the upper layer(s) we support long frames.
1504 */
1505 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1506
1507 /* Set capability flags */
1508 ifp->if_capabilities |= IFCAP_HWCSUM
1509 | IFCAP_TSOv4
1510 | IFCAP_TSOv6;
1511 ifp->if_capenable = 0;
1512
1513 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1514 | ETHERCAP_VLAN_HWCSUM
1515 | ETHERCAP_JUMBO_MTU
1516 | ETHERCAP_VLAN_MTU;
1517
1518 /* Enable the above capabilities by default */
1519 ec->ec_capenable = ec->ec_capabilities;
1520
1521 /* Don't enable LRO by default */
1522 ifp->if_capabilities |= IFCAP_LRO;
1523 #if 0
1524 ifp->if_capenable = ifp->if_capabilities;
1525 #endif
1526
1527 /*
1528 * Specify the media types supported by this adapter and register
1529 * callbacks to update media and link information
1530 */
1531 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1532 ixv_media_status);
1533 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1534 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1535
1536 return 0;
1537 } /* ixv_setup_interface */
1538
1539
1540 /************************************************************************
1541 * ixv_initialize_transmit_units - Enable transmit unit.
1542 ************************************************************************/
1543 static void
1544 ixv_initialize_transmit_units(struct adapter *adapter)
1545 {
1546 struct tx_ring *txr = adapter->tx_rings;
1547 struct ixgbe_hw *hw = &adapter->hw;
1548
1549
1550 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1551 u64 tdba = txr->txdma.dma_paddr;
1552 u32 txctrl, txdctl;
1553
1554 /* Set WTHRESH to 8, burst writeback */
1555 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1556 txdctl |= (8 << 16);
1557 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1558
1559 /* Set the HW Tx Head and Tail indices */
1560 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1561 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1562
1563 /* Set Tx Tail register */
1564 txr->tail = IXGBE_VFTDT(i);
1565
1566 /* Set Ring parameters */
1567 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1568 (tdba & 0x00000000ffffffffULL));
1569 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1570 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1571 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1572 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1573 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1574 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1575
1576 /* Now enable */
1577 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1578 txdctl |= IXGBE_TXDCTL_ENABLE;
1579 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1580 }
1581
1582 return;
1583 } /* ixv_initialize_transmit_units */
1584
1585
1586 /************************************************************************
1587 * ixv_initialize_rss_mapping
1588 ************************************************************************/
1589 static void
1590 ixv_initialize_rss_mapping(struct adapter *adapter)
1591 {
1592 struct ixgbe_hw *hw = &adapter->hw;
1593 u32 reta = 0, mrqc, rss_key[10];
1594 int queue_id;
1595 int i, j;
1596 u32 rss_hash_config;
1597
1598 /* force use default RSS key. */
1599 #ifdef __NetBSD__
1600 rss_getkey((uint8_t *) &rss_key);
1601 #else
1602 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1603 /* Fetch the configured RSS key */
1604 rss_getkey((uint8_t *)&rss_key);
1605 } else {
1606 /* set up random bits */
1607 cprng_fast(&rss_key, sizeof(rss_key));
1608 }
1609 #endif
1610
1611 /* Now fill out hash function seeds */
1612 for (i = 0; i < 10; i++)
1613 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1614
1615 /* Set up the redirection table */
1616 for (i = 0, j = 0; i < 64; i++, j++) {
1617 if (j == adapter->num_queues)
1618 j = 0;
1619
1620 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1621 /*
1622 * Fetch the RSS bucket id for the given indirection
1623 * entry. Cap it at the number of configured buckets
1624 * (which is num_queues.)
1625 */
1626 queue_id = rss_get_indirection_to_bucket(i);
1627 queue_id = queue_id % adapter->num_queues;
1628 } else
1629 queue_id = j;
1630
1631 /*
1632 * The low 8 bits are for hash value (n+0);
1633 * The next 8 bits are for hash value (n+1), etc.
1634 */
1635 reta >>= 8;
1636 reta |= ((uint32_t)queue_id) << 24;
1637 if ((i & 3) == 3) {
1638 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1639 reta = 0;
1640 }
1641 }
1642
1643 /* Perform hash on these packet types */
1644 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1645 rss_hash_config = rss_gethashconfig();
1646 else {
1647 /*
1648 * Disable UDP - IP fragments aren't currently being handled
1649 * and so we end up with a mix of 2-tuple and 4-tuple
1650 * traffic.
1651 */
1652 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1653 | RSS_HASHTYPE_RSS_TCP_IPV4
1654 | RSS_HASHTYPE_RSS_IPV6
1655 | RSS_HASHTYPE_RSS_TCP_IPV6;
1656 }
1657
1658 mrqc = IXGBE_MRQC_RSSEN;
1659 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1660 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1661 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1662 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1663 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1664 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1665 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1666 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1667 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1668 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1669 __func__);
1670 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1671 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1672 __func__);
1673 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1674 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1675 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1676 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1677 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1678 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1679 __func__);
1680 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1681 } /* ixv_initialize_rss_mapping */
1682
1683
1684 /************************************************************************
1685 * ixv_initialize_receive_units - Setup receive registers and features.
1686 ************************************************************************/
1687 static void
1688 ixv_initialize_receive_units(struct adapter *adapter)
1689 {
1690 struct rx_ring *rxr = adapter->rx_rings;
1691 struct ixgbe_hw *hw = &adapter->hw;
1692 struct ifnet *ifp = adapter->ifp;
1693 u32 bufsz, rxcsum, psrtype;
1694
1695 if (ifp->if_mtu > ETHERMTU)
1696 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1697 else
1698 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1699
1700 psrtype = IXGBE_PSRTYPE_TCPHDR
1701 | IXGBE_PSRTYPE_UDPHDR
1702 | IXGBE_PSRTYPE_IPV4HDR
1703 | IXGBE_PSRTYPE_IPV6HDR
1704 | IXGBE_PSRTYPE_L2HDR;
1705
1706 if (adapter->num_queues > 1)
1707 psrtype |= 1 << 29;
1708
1709 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1710
1711 /* Tell PF our max_frame size */
1712 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1713 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1714 }
1715
1716 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1717 u64 rdba = rxr->rxdma.dma_paddr;
1718 u32 reg, rxdctl;
1719
1720 /* Disable the queue */
1721 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1722 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1723 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1724 for (int j = 0; j < 10; j++) {
1725 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1726 IXGBE_RXDCTL_ENABLE)
1727 msec_delay(1);
1728 else
1729 break;
1730 }
1731 wmb();
1732 /* Setup the Base and Length of the Rx Descriptor Ring */
1733 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1734 (rdba & 0x00000000ffffffffULL));
1735 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1736 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1737 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1738
1739 /* Reset the ring indices */
1740 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1741 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1742
1743 /* Set up the SRRCTL register */
1744 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1745 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1746 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1747 reg |= bufsz;
1748 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1749 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1750
1751 /* Capture Rx Tail index */
1752 rxr->tail = IXGBE_VFRDT(rxr->me);
1753
1754 /* Do the queue enabling last */
1755 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1756 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1757 for (int k = 0; k < 10; k++) {
1758 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1759 IXGBE_RXDCTL_ENABLE)
1760 break;
1761 msec_delay(1);
1762 }
1763 wmb();
1764
1765 /* Set the Tail Pointer */
1766 /*
1767 * In netmap mode, we must preserve the buffers made
1768 * available to userspace before the if_init()
1769 * (this is true by default on the TX side, because
1770 * init makes all buffers available to userspace).
1771 *
1772 * netmap_reset() and the device specific routines
1773 * (e.g. ixgbe_setup_receive_rings()) map these
1774 * buffers at the end of the NIC ring, so here we
1775 * must set the RDT (tail) register to make sure
1776 * they are not overwritten.
1777 *
1778 * In this driver the NIC ring starts at RDH = 0,
1779 * RDT points to the last slot available for reception (?),
1780 * so RDT = num_rx_desc - 1 means the whole ring is available.
1781 */
1782 #ifdef DEV_NETMAP
1783 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1784 (ifp->if_capenable & IFCAP_NETMAP)) {
1785 struct netmap_adapter *na = NA(adapter->ifp);
1786 struct netmap_kring *kring = &na->rx_rings[i];
1787 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1788
1789 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1790 } else
1791 #endif /* DEV_NETMAP */
1792 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1793 adapter->num_rx_desc - 1);
1794 }
1795
1796 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1797
1798 ixv_initialize_rss_mapping(adapter);
1799
1800 if (adapter->num_queues > 1) {
1801 /* RSS and RX IPP Checksum are mutually exclusive */
1802 rxcsum |= IXGBE_RXCSUM_PCSD;
1803 }
1804
1805 if (ifp->if_capenable & IFCAP_RXCSUM)
1806 rxcsum |= IXGBE_RXCSUM_PCSD;
1807
1808 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1809 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1810
1811 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1812
1813 return;
1814 } /* ixv_initialize_receive_units */
1815
1816 /************************************************************************
1817 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1818 *
1819 * Retrieves the TDH value from the hardware
1820 ************************************************************************/
1821 static int
1822 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1823 {
1824 struct sysctlnode node = *rnode;
1825 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1826 uint32_t val;
1827
1828 if (!txr)
1829 return (0);
1830
1831 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1832 node.sysctl_data = &val;
1833 return sysctl_lookup(SYSCTLFN_CALL(&node));
1834 } /* ixv_sysctl_tdh_handler */
1835
1836 /************************************************************************
1837 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1838 *
1839 * Retrieves the TDT value from the hardware
1840 ************************************************************************/
1841 static int
1842 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1843 {
1844 struct sysctlnode node = *rnode;
1845 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1846 uint32_t val;
1847
1848 if (!txr)
1849 return (0);
1850
1851 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1852 node.sysctl_data = &val;
1853 return sysctl_lookup(SYSCTLFN_CALL(&node));
1854 } /* ixv_sysctl_tdt_handler */
1855
1856 /************************************************************************
1857 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1858 *
1859 * Retrieves the RDH value from the hardware
1860 ************************************************************************/
1861 static int
1862 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1863 {
1864 struct sysctlnode node = *rnode;
1865 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1866 uint32_t val;
1867
1868 if (!rxr)
1869 return (0);
1870
1871 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1872 node.sysctl_data = &val;
1873 return sysctl_lookup(SYSCTLFN_CALL(&node));
1874 } /* ixv_sysctl_rdh_handler */
1875
1876 /************************************************************************
1877 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1878 *
1879 * Retrieves the RDT value from the hardware
1880 ************************************************************************/
1881 static int
1882 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1883 {
1884 struct sysctlnode node = *rnode;
1885 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1886 uint32_t val;
1887
1888 if (!rxr)
1889 return (0);
1890
1891 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1892 node.sysctl_data = &val;
1893 return sysctl_lookup(SYSCTLFN_CALL(&node));
1894 } /* ixv_sysctl_rdt_handler */
1895
1896 /************************************************************************
1897 * ixv_setup_vlan_support
1898 ************************************************************************/
1899 static void
1900 ixv_setup_vlan_support(struct adapter *adapter)
1901 {
1902 struct ethercom *ec = &adapter->osdep.ec;
1903 struct ixgbe_hw *hw = &adapter->hw;
1904 struct rx_ring *rxr;
1905 u32 ctrl, vid, vfta, retry;
1906
1907 /*
1908 * We get here thru init_locked, meaning
1909 * a soft reset, this has already cleared
1910 * the VFTA and other state, so if there
1911 * have been no vlan's registered do nothing.
1912 */
1913 if (!VLAN_ATTACHED(ec))
1914 return;
1915
1916 /* Enable the queues */
1917 for (int i = 0; i < adapter->num_queues; i++) {
1918 rxr = &adapter->rx_rings[i];
1919 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1920 ctrl |= IXGBE_RXDCTL_VME;
1921 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1922 /*
1923 * Let Rx path know that it needs to store VLAN tag
1924 * as part of extra mbuf info.
1925 */
1926 rxr->vtag_strip = TRUE;
1927 }
1928
1929 #if 1
1930 /* XXX dirty hack. Enable all VIDs */
1931 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1932 adapter->shadow_vfta[i] = 0xffffffff;
1933 #endif
1934 /*
1935 * A soft reset zero's out the VFTA, so
1936 * we need to repopulate it now.
1937 */
1938 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1939 if (adapter->shadow_vfta[i] == 0)
1940 continue;
1941 vfta = adapter->shadow_vfta[i];
1942 /*
1943 * Reconstruct the vlan id's
1944 * based on the bits set in each
1945 * of the array ints.
1946 */
1947 for (int j = 0; j < 32; j++) {
1948 retry = 0;
1949 if ((vfta & (1 << j)) == 0)
1950 continue;
1951 vid = (i * 32) + j;
1952 /* Call the shared code mailbox routine */
1953 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1954 if (++retry > 5)
1955 break;
1956 }
1957 }
1958 }
1959 } /* ixv_setup_vlan_support */
1960
1961 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1962 /************************************************************************
1963 * ixv_register_vlan
1964 *
1965 * Run via a vlan config EVENT, it enables us to use the
1966 * HW Filter table since we can get the vlan id. This just
1967 * creates the entry in the soft version of the VFTA, init
1968 * will repopulate the real table.
1969 ************************************************************************/
1970 static void
1971 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1972 {
1973 struct adapter *adapter = ifp->if_softc;
1974 u16 index, bit;
1975
1976 if (ifp->if_softc != arg) /* Not our event */
1977 return;
1978
1979 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1980 return;
1981
1982 IXGBE_CORE_LOCK(adapter);
1983 index = (vtag >> 5) & 0x7F;
1984 bit = vtag & 0x1F;
1985 adapter->shadow_vfta[index] |= (1 << bit);
1986 /* Re-init to load the changes */
1987 ixv_init_locked(adapter);
1988 IXGBE_CORE_UNLOCK(adapter);
1989 } /* ixv_register_vlan */
1990
1991 /************************************************************************
1992 * ixv_unregister_vlan
1993 *
1994 * Run via a vlan unconfig EVENT, remove our entry
1995 * in the soft vfta.
1996 ************************************************************************/
1997 static void
1998 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1999 {
2000 struct adapter *adapter = ifp->if_softc;
2001 u16 index, bit;
2002
2003 if (ifp->if_softc != arg)
2004 return;
2005
2006 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2007 return;
2008
2009 IXGBE_CORE_LOCK(adapter);
2010 index = (vtag >> 5) & 0x7F;
2011 bit = vtag & 0x1F;
2012 adapter->shadow_vfta[index] &= ~(1 << bit);
2013 /* Re-init to load the changes */
2014 ixv_init_locked(adapter);
2015 IXGBE_CORE_UNLOCK(adapter);
2016 } /* ixv_unregister_vlan */
2017 #endif
2018
2019 /************************************************************************
2020 * ixv_enable_intr
2021 ************************************************************************/
2022 static void
2023 ixv_enable_intr(struct adapter *adapter)
2024 {
2025 struct ixgbe_hw *hw = &adapter->hw;
2026 struct ix_queue *que = adapter->queues;
2027 u32 mask;
2028 int i;
2029
2030 /* For VTEIAC */
2031 mask = (1 << adapter->vector);
2032 for (i = 0; i < adapter->num_queues; i++, que++)
2033 mask |= (1 << que->msix);
2034 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2035
2036 /* For VTEIMS */
2037 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2038 que = adapter->queues;
2039 for (i = 0; i < adapter->num_queues; i++, que++)
2040 ixv_enable_queue(adapter, que->msix);
2041
2042 IXGBE_WRITE_FLUSH(hw);
2043
2044 return;
2045 } /* ixv_enable_intr */
2046
2047 /************************************************************************
2048 * ixv_disable_intr
2049 ************************************************************************/
2050 static void
2051 ixv_disable_intr(struct adapter *adapter)
2052 {
2053 struct ix_queue *que = adapter->queues;
2054
2055 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2056
2057 /* disable interrupts other than queues */
2058 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2059
2060 for (int i = 0; i < adapter->num_queues; i++, que++)
2061 ixv_disable_queue(adapter, que->msix);
2062
2063 IXGBE_WRITE_FLUSH(&adapter->hw);
2064
2065 return;
2066 } /* ixv_disable_intr */
2067
2068 /************************************************************************
2069 * ixv_set_ivar
2070 *
2071 * Setup the correct IVAR register for a particular MSI-X interrupt
2072 * - entry is the register array entry
2073 * - vector is the MSI-X vector for this queue
2074 * - type is RX/TX/MISC
2075 ************************************************************************/
2076 static void
2077 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2078 {
2079 struct ixgbe_hw *hw = &adapter->hw;
2080 u32 ivar, index;
2081
2082 vector |= IXGBE_IVAR_ALLOC_VAL;
2083
2084 if (type == -1) { /* MISC IVAR */
2085 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2086 ivar &= ~0xFF;
2087 ivar |= vector;
2088 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2089 } else { /* RX/TX IVARS */
2090 index = (16 * (entry & 1)) + (8 * type);
2091 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2092 ivar &= ~(0xFF << index);
2093 ivar |= (vector << index);
2094 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2095 }
2096 } /* ixv_set_ivar */
2097
2098 /************************************************************************
2099 * ixv_configure_ivars
2100 ************************************************************************/
2101 static void
2102 ixv_configure_ivars(struct adapter *adapter)
2103 {
2104 struct ix_queue *que = adapter->queues;
2105
2106 /* XXX We should sync EITR value calculation with ixgbe.c? */
2107
2108 for (int i = 0; i < adapter->num_queues; i++, que++) {
2109 /* First the RX queue entry */
2110 ixv_set_ivar(adapter, i, que->msix, 0);
2111 /* ... and the TX */
2112 ixv_set_ivar(adapter, i, que->msix, 1);
2113 /* Set an initial value in EITR */
2114 ixv_eitr_write(que, IXGBE_EITR_DEFAULT);
2115 }
2116
2117 /* For the mailbox interrupt */
2118 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2119 } /* ixv_configure_ivars */
2120
2121
2122 /************************************************************************
2123 * ixv_save_stats
2124 *
2125 * The VF stats registers never have a truly virgin
2126 * starting point, so this routine tries to make an
2127 * artificial one, marking ground zero on attach as
2128 * it were.
2129 ************************************************************************/
2130 static void
2131 ixv_save_stats(struct adapter *adapter)
2132 {
2133 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2134
2135 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2136 stats->saved_reset_vfgprc +=
2137 stats->vfgprc.ev_count - stats->base_vfgprc;
2138 stats->saved_reset_vfgptc +=
2139 stats->vfgptc.ev_count - stats->base_vfgptc;
2140 stats->saved_reset_vfgorc +=
2141 stats->vfgorc.ev_count - stats->base_vfgorc;
2142 stats->saved_reset_vfgotc +=
2143 stats->vfgotc.ev_count - stats->base_vfgotc;
2144 stats->saved_reset_vfmprc +=
2145 stats->vfmprc.ev_count - stats->base_vfmprc;
2146 }
2147 } /* ixv_save_stats */
2148
2149 /************************************************************************
2150 * ixv_init_stats
2151 ************************************************************************/
2152 static void
2153 ixv_init_stats(struct adapter *adapter)
2154 {
2155 struct ixgbe_hw *hw = &adapter->hw;
2156
2157 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2158 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2159 adapter->stats.vf.last_vfgorc |=
2160 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2161
2162 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2163 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2164 adapter->stats.vf.last_vfgotc |=
2165 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2166
2167 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2168
2169 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2170 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2171 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2172 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2173 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2174 } /* ixv_init_stats */
2175
2176 #define UPDATE_STAT_32(reg, last, count) \
2177 { \
2178 u32 current = IXGBE_READ_REG(hw, (reg)); \
2179 if (current < (last)) \
2180 count.ev_count += 0x100000000LL; \
2181 (last) = current; \
2182 count.ev_count &= 0xFFFFFFFF00000000LL; \
2183 count.ev_count |= current; \
2184 }
2185
2186 #define UPDATE_STAT_36(lsb, msb, last, count) \
2187 { \
2188 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2189 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2190 u64 current = ((cur_msb << 32) | cur_lsb); \
2191 if (current < (last)) \
2192 count.ev_count += 0x1000000000LL; \
2193 (last) = current; \
2194 count.ev_count &= 0xFFFFFFF000000000LL; \
2195 count.ev_count |= current; \
2196 }
2197
2198 /************************************************************************
2199 * ixv_update_stats - Update the board statistics counters.
2200 ************************************************************************/
2201 void
2202 ixv_update_stats(struct adapter *adapter)
2203 {
2204 struct ixgbe_hw *hw = &adapter->hw;
2205 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2206
2207 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2208 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2209 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2210 stats->vfgorc);
2211 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2212 stats->vfgotc);
2213 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2214
2215 /* Fill out the OS statistics structure */
2216 /*
2217 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2218 * adapter->stats counters. It's required to make ifconfig -z
2219 * (SOICZIFDATA) work.
2220 */
2221 } /* ixv_update_stats */
2222
2223 /************************************************************************
2224 * ixv_sysctl_interrupt_rate_handler
2225 ************************************************************************/
2226 static int
2227 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2228 {
2229 struct sysctlnode node = *rnode;
2230 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2231 struct adapter *adapter = que->adapter;
2232 uint32_t reg, usec, rate;
2233 int error;
2234
2235 if (que == NULL)
2236 return 0;
2237 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2238 usec = ((reg & 0x0FF8) >> 3);
2239 if (usec > 0)
2240 rate = 500000 / usec;
2241 else
2242 rate = 0;
2243 node.sysctl_data = &rate;
2244 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2245 if (error || newp == NULL)
2246 return error;
2247 reg &= ~0xfff; /* default, no limitation */
2248 if (rate > 0 && rate < 500000) {
2249 if (rate < 1000)
2250 rate = 1000;
2251 reg |= ((4000000/rate) & 0xff8);
2252 /*
2253 * When RSC is used, ITR interval must be larger than
2254 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2255 * The minimum value is always greater than 2us on 100M
2256 * (and 10M?(not documented)), but it's not on 1G and higher.
2257 */
2258 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2259 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2260 if ((adapter->num_queues > 1)
2261 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2262 return EINVAL;
2263 }
2264 ixv_max_interrupt_rate = rate;
2265 } else
2266 ixv_max_interrupt_rate = 0;
2267 ixv_eitr_write(que, reg);
2268
2269 return (0);
2270 } /* ixv_sysctl_interrupt_rate_handler */
2271
2272 const struct sysctlnode *
2273 ixv_sysctl_instance(struct adapter *adapter)
2274 {
2275 const char *dvname;
2276 struct sysctllog **log;
2277 int rc;
2278 const struct sysctlnode *rnode;
2279
2280 log = &adapter->sysctllog;
2281 dvname = device_xname(adapter->dev);
2282
2283 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2284 0, CTLTYPE_NODE, dvname,
2285 SYSCTL_DESCR("ixv information and settings"),
2286 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2287 goto err;
2288
2289 return rnode;
2290 err:
2291 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2292 return NULL;
2293 }
2294
2295 static void
2296 ixv_add_device_sysctls(struct adapter *adapter)
2297 {
2298 struct sysctllog **log;
2299 const struct sysctlnode *rnode, *cnode;
2300 device_t dev;
2301
2302 dev = adapter->dev;
2303 log = &adapter->sysctllog;
2304
2305 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2306 aprint_error_dev(dev, "could not create sysctl root\n");
2307 return;
2308 }
2309
2310 if (sysctl_createv(log, 0, &rnode, &cnode,
2311 CTLFLAG_READWRITE, CTLTYPE_INT,
2312 "debug", SYSCTL_DESCR("Debug Info"),
2313 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2314 aprint_error_dev(dev, "could not create sysctl\n");
2315
2316 if (sysctl_createv(log, 0, &rnode, &cnode,
2317 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2318 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2319 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2320 aprint_error_dev(dev, "could not create sysctl\n");
2321
2322 if (sysctl_createv(log, 0, &rnode, &cnode,
2323 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2324 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2325 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2326 aprint_error_dev(dev, "could not create sysctl\n");
2327 }
2328
2329 /************************************************************************
2330 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2331 ************************************************************************/
2332 static void
2333 ixv_add_stats_sysctls(struct adapter *adapter)
2334 {
2335 device_t dev = adapter->dev;
2336 struct tx_ring *txr = adapter->tx_rings;
2337 struct rx_ring *rxr = adapter->rx_rings;
2338 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2339 struct ixgbe_hw *hw = &adapter->hw;
2340 const struct sysctlnode *rnode, *cnode;
2341 struct sysctllog **log = &adapter->sysctllog;
2342 const char *xname = device_xname(dev);
2343
2344 /* Driver Statistics */
2345 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2346 NULL, xname, "Driver tx dma soft fail EFBIG");
2347 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2348 NULL, xname, "m_defrag() failed");
2349 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2350 NULL, xname, "Driver tx dma hard fail EFBIG");
2351 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2352 NULL, xname, "Driver tx dma hard fail EINVAL");
2353 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2354 NULL, xname, "Driver tx dma hard fail other");
2355 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2356 NULL, xname, "Driver tx dma soft fail EAGAIN");
2357 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2358 NULL, xname, "Driver tx dma soft fail ENOMEM");
2359 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2360 NULL, xname, "Watchdog timeouts");
2361 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2362 NULL, xname, "TSO errors");
2363 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2364 NULL, xname, "Link MSI-X IRQ Handled");
2365
2366 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2367 snprintf(adapter->queues[i].evnamebuf,
2368 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2369 xname, i);
2370 snprintf(adapter->queues[i].namebuf,
2371 sizeof(adapter->queues[i].namebuf), "q%d", i);
2372
2373 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2374 aprint_error_dev(dev, "could not create sysctl root\n");
2375 break;
2376 }
2377
2378 if (sysctl_createv(log, 0, &rnode, &rnode,
2379 0, CTLTYPE_NODE,
2380 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2381 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2382 break;
2383
2384 if (sysctl_createv(log, 0, &rnode, &cnode,
2385 CTLFLAG_READWRITE, CTLTYPE_INT,
2386 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2387 ixv_sysctl_interrupt_rate_handler, 0,
2388 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2389 break;
2390
2391 if (sysctl_createv(log, 0, &rnode, &cnode,
2392 CTLFLAG_READONLY, CTLTYPE_INT,
2393 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2394 ixv_sysctl_tdh_handler, 0, (void *)txr,
2395 0, CTL_CREATE, CTL_EOL) != 0)
2396 break;
2397
2398 if (sysctl_createv(log, 0, &rnode, &cnode,
2399 CTLFLAG_READONLY, CTLTYPE_INT,
2400 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2401 ixv_sysctl_tdt_handler, 0, (void *)txr,
2402 0, CTL_CREATE, CTL_EOL) != 0)
2403 break;
2404
2405 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2406 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2407 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2408 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2409 "Handled queue in softint");
2410 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2411 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2412 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2413 NULL, adapter->queues[i].evnamebuf, "TSO");
2414 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2415 NULL, adapter->queues[i].evnamebuf,
2416 "Queue No Descriptor Available");
2417 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2418 NULL, adapter->queues[i].evnamebuf,
2419 "Queue Packets Transmitted");
2420 #ifndef IXGBE_LEGACY_TX
2421 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2422 NULL, adapter->queues[i].evnamebuf,
2423 "Packets dropped in pcq");
2424 #endif
2425
2426 #ifdef LRO
2427 struct lro_ctrl *lro = &rxr->lro;
2428 #endif /* LRO */
2429
2430 if (sysctl_createv(log, 0, &rnode, &cnode,
2431 CTLFLAG_READONLY,
2432 CTLTYPE_INT,
2433 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2434 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2435 CTL_CREATE, CTL_EOL) != 0)
2436 break;
2437
2438 if (sysctl_createv(log, 0, &rnode, &cnode,
2439 CTLFLAG_READONLY,
2440 CTLTYPE_INT,
2441 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2442 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2443 CTL_CREATE, CTL_EOL) != 0)
2444 break;
2445
2446 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2447 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2448 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2449 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2450 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2451 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2452 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2453 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2454 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2455 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2456 #ifdef LRO
2457 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2458 CTLFLAG_RD, &lro->lro_queued, 0,
2459 "LRO Queued");
2460 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2461 CTLFLAG_RD, &lro->lro_flushed, 0,
2462 "LRO Flushed");
2463 #endif /* LRO */
2464 }
2465
2466 /* MAC stats get their own sub node */
2467
2468 snprintf(stats->namebuf,
2469 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2470
2471 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2472 stats->namebuf, "rx csum offload - IP");
2473 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2474 stats->namebuf, "rx csum offload - L4");
2475 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2476 stats->namebuf, "rx csum offload - IP bad");
2477 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2478 stats->namebuf, "rx csum offload - L4 bad");
2479
2480 /* Packet Reception Stats */
2481 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2482 xname, "Good Packets Received");
2483 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2484 xname, "Good Octets Received");
2485 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2486 xname, "Multicast Packets Received");
2487 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2488 xname, "Good Packets Transmitted");
2489 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2490 xname, "Good Octets Transmitted");
2491
2492 /* Mailbox Stats */
2493 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2494 xname, "message TXs");
2495 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2496 xname, "message RXs");
2497 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2498 xname, "ACKs");
2499 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2500 xname, "REQs");
2501 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2502 xname, "RSTs");
2503
2504 } /* ixv_add_stats_sysctls */
2505
2506 /************************************************************************
2507 * ixv_set_sysctl_value
2508 ************************************************************************/
2509 static void
2510 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2511 const char *description, int *limit, int value)
2512 {
2513 device_t dev = adapter->dev;
2514 struct sysctllog **log;
2515 const struct sysctlnode *rnode, *cnode;
2516
2517 log = &adapter->sysctllog;
2518 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2519 aprint_error_dev(dev, "could not create sysctl root\n");
2520 return;
2521 }
2522 if (sysctl_createv(log, 0, &rnode, &cnode,
2523 CTLFLAG_READWRITE, CTLTYPE_INT,
2524 name, SYSCTL_DESCR(description),
2525 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2526 aprint_error_dev(dev, "could not create sysctl\n");
2527 *limit = value;
2528 } /* ixv_set_sysctl_value */
2529
2530 /************************************************************************
2531 * ixv_print_debug_info
2532 *
2533 * Called only when em_display_debug_stats is enabled.
2534 * Provides a way to take a look at important statistics
2535 * maintained by the driver and hardware.
2536 ************************************************************************/
2537 static void
2538 ixv_print_debug_info(struct adapter *adapter)
2539 {
2540 device_t dev = adapter->dev;
2541 struct ixgbe_hw *hw = &adapter->hw;
2542 struct ix_queue *que = adapter->queues;
2543 struct rx_ring *rxr;
2544 struct tx_ring *txr;
2545 #ifdef LRO
2546 struct lro_ctrl *lro;
2547 #endif /* LRO */
2548
2549 device_printf(dev, "Error Byte Count = %u \n",
2550 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2551
2552 for (int i = 0; i < adapter->num_queues; i++, que++) {
2553 txr = que->txr;
2554 rxr = que->rxr;
2555 #ifdef LRO
2556 lro = &rxr->lro;
2557 #endif /* LRO */
2558 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2559 que->msix, (long)que->irqs.ev_count);
2560 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2561 rxr->me, (long long)rxr->rx_packets.ev_count);
2562 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2563 rxr->me, (long)rxr->rx_bytes.ev_count);
2564 #ifdef LRO
2565 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2566 rxr->me, (long long)lro->lro_queued);
2567 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2568 rxr->me, (long long)lro->lro_flushed);
2569 #endif /* LRO */
2570 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2571 txr->me, (long)txr->total_packets.ev_count);
2572 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2573 txr->me, (long)txr->no_desc_avail.ev_count);
2574 }
2575
2576 device_printf(dev, "MBX IRQ Handled: %lu\n",
2577 (long)adapter->link_irq.ev_count);
2578 } /* ixv_print_debug_info */
2579
2580 /************************************************************************
2581 * ixv_sysctl_debug
2582 ************************************************************************/
2583 static int
2584 ixv_sysctl_debug(SYSCTLFN_ARGS)
2585 {
2586 struct sysctlnode node;
2587 struct adapter *adapter;
2588 int error, result;
2589
2590 node = *rnode;
2591 node.sysctl_data = &result;
2592 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2593
2594 if (error || newp == NULL)
2595 return error;
2596
2597 if (result == 1) {
2598 adapter = (struct adapter *)node.sysctl_data;
2599 ixv_print_debug_info(adapter);
2600 }
2601
2602 return 0;
2603 } /* ixv_sysctl_debug */
2604
2605 /************************************************************************
2606 * ixv_init_device_features
2607 ************************************************************************/
2608 static void
2609 ixv_init_device_features(struct adapter *adapter)
2610 {
2611 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2612 | IXGBE_FEATURE_VF
2613 | IXGBE_FEATURE_RSS
2614 | IXGBE_FEATURE_LEGACY_TX;
2615
2616 /* A tad short on feature flags for VFs, atm. */
2617 switch (adapter->hw.mac.type) {
2618 case ixgbe_mac_82599_vf:
2619 break;
2620 case ixgbe_mac_X540_vf:
2621 break;
2622 case ixgbe_mac_X550_vf:
2623 case ixgbe_mac_X550EM_x_vf:
2624 case ixgbe_mac_X550EM_a_vf:
2625 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2626 break;
2627 default:
2628 break;
2629 }
2630
2631 /* Enabled by default... */
2632 /* Is a virtual function (VF) */
2633 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2634 adapter->feat_en |= IXGBE_FEATURE_VF;
2635 /* Netmap */
2636 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2637 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2638 /* Receive-Side Scaling (RSS) */
2639 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2640 adapter->feat_en |= IXGBE_FEATURE_RSS;
2641 /* Needs advanced context descriptor regardless of offloads req'd */
2642 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2643 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2644
2645 /* Enabled via sysctl... */
2646 /* Legacy (single queue) transmit */
2647 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2648 ixv_enable_legacy_tx)
2649 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2650 } /* ixv_init_device_features */
2651
2652 /************************************************************************
2653 * ixv_shutdown - Shutdown entry point
2654 ************************************************************************/
2655 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2656 static int
2657 ixv_shutdown(device_t dev)
2658 {
2659 struct adapter *adapter = device_private(dev);
2660 IXGBE_CORE_LOCK(adapter);
2661 ixv_stop(adapter);
2662 IXGBE_CORE_UNLOCK(adapter);
2663
2664 return (0);
2665 } /* ixv_shutdown */
2666 #endif
2667
2668 static int
2669 ixv_ifflags_cb(struct ethercom *ec)
2670 {
2671 struct ifnet *ifp = &ec->ec_if;
2672 struct adapter *adapter = ifp->if_softc;
2673 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2674
2675 IXGBE_CORE_LOCK(adapter);
2676
2677 if (change != 0)
2678 adapter->if_flags = ifp->if_flags;
2679
2680 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2681 rc = ENETRESET;
2682
2683 /* Set up VLAN support and filter */
2684 ixv_setup_vlan_support(adapter);
2685
2686 IXGBE_CORE_UNLOCK(adapter);
2687
2688 return rc;
2689 }
2690
2691
2692 /************************************************************************
2693 * ixv_ioctl - Ioctl entry point
2694 *
2695 * Called when the user wants to configure the interface.
2696 *
2697 * return 0 on success, positive on failure
2698 ************************************************************************/
2699 static int
2700 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2701 {
2702 struct adapter *adapter = ifp->if_softc;
2703 struct ifcapreq *ifcr = data;
2704 struct ifreq *ifr = data;
2705 int error = 0;
2706 int l4csum_en;
2707 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2708 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2709
2710 switch (command) {
2711 case SIOCSIFFLAGS:
2712 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2713 break;
2714 case SIOCADDMULTI:
2715 case SIOCDELMULTI:
2716 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2717 break;
2718 case SIOCSIFMEDIA:
2719 case SIOCGIFMEDIA:
2720 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2721 break;
2722 case SIOCSIFCAP:
2723 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2724 break;
2725 case SIOCSIFMTU:
2726 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2727 break;
2728 default:
2729 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2730 break;
2731 }
2732
2733 switch (command) {
2734 case SIOCSIFMEDIA:
2735 case SIOCGIFMEDIA:
2736 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2737 case SIOCSIFCAP:
2738 /* Layer-4 Rx checksum offload has to be turned on and
2739 * off as a unit.
2740 */
2741 l4csum_en = ifcr->ifcr_capenable & l4csum;
2742 if (l4csum_en != l4csum && l4csum_en != 0)
2743 return EINVAL;
2744 /*FALLTHROUGH*/
2745 case SIOCADDMULTI:
2746 case SIOCDELMULTI:
2747 case SIOCSIFFLAGS:
2748 case SIOCSIFMTU:
2749 default:
2750 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2751 return error;
2752 if ((ifp->if_flags & IFF_RUNNING) == 0)
2753 ;
2754 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2755 IXGBE_CORE_LOCK(adapter);
2756 ixv_init_locked(adapter);
2757 IXGBE_CORE_UNLOCK(adapter);
2758 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2759 /*
2760 * Multicast list has changed; set the hardware filter
2761 * accordingly.
2762 */
2763 IXGBE_CORE_LOCK(adapter);
2764 ixv_disable_intr(adapter);
2765 ixv_set_multi(adapter);
2766 ixv_enable_intr(adapter);
2767 IXGBE_CORE_UNLOCK(adapter);
2768 }
2769 return 0;
2770 }
2771 } /* ixv_ioctl */
2772
2773 /************************************************************************
2774 * ixv_init
2775 ************************************************************************/
2776 static int
2777 ixv_init(struct ifnet *ifp)
2778 {
2779 struct adapter *adapter = ifp->if_softc;
2780
2781 IXGBE_CORE_LOCK(adapter);
2782 ixv_init_locked(adapter);
2783 IXGBE_CORE_UNLOCK(adapter);
2784
2785 return 0;
2786 } /* ixv_init */
2787
2788 /************************************************************************
2789 * ixv_handle_que
2790 ************************************************************************/
2791 static void
2792 ixv_handle_que(void *context)
2793 {
2794 struct ix_queue *que = context;
2795 struct adapter *adapter = que->adapter;
2796 struct tx_ring *txr = que->txr;
2797 struct ifnet *ifp = adapter->ifp;
2798 bool more;
2799
2800 que->handleq.ev_count++;
2801
2802 if (ifp->if_flags & IFF_RUNNING) {
2803 more = ixgbe_rxeof(que);
2804 IXGBE_TX_LOCK(txr);
2805 more |= ixgbe_txeof(txr);
2806 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2807 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2808 ixgbe_mq_start_locked(ifp, txr);
2809 /* Only for queue 0 */
2810 /* NetBSD still needs this for CBQ */
2811 if ((&adapter->queues[0] == que)
2812 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2813 ixgbe_legacy_start_locked(ifp, txr);
2814 IXGBE_TX_UNLOCK(txr);
2815 if (more) {
2816 que->req.ev_count++;
2817 if (adapter->txrx_use_workqueue) {
2818 /*
2819 * "enqueued flag" is not required here
2820 * the same as ixg(4). See ixgbe_msix_que().
2821 */
2822 workqueue_enqueue(adapter->que_wq,
2823 &que->wq_cookie, curcpu());
2824 } else
2825 softint_schedule(que->que_si);
2826 return;
2827 }
2828 }
2829
2830 /* Re-enable this interrupt */
2831 ixv_enable_queue(adapter, que->msix);
2832
2833 return;
2834 } /* ixv_handle_que */
2835
2836 /************************************************************************
2837 * ixv_handle_que_work
2838 ************************************************************************/
2839 static void
2840 ixv_handle_que_work(struct work *wk, void *context)
2841 {
2842 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2843
2844 /*
2845 * "enqueued flag" is not required here the same as ixg(4).
2846 * See ixgbe_msix_que().
2847 */
2848 ixv_handle_que(que);
2849 }
2850
2851 /************************************************************************
2852 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2853 ************************************************************************/
2854 static int
2855 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2856 {
2857 device_t dev = adapter->dev;
2858 struct ix_queue *que = adapter->queues;
2859 struct tx_ring *txr = adapter->tx_rings;
2860 int error, msix_ctrl, rid, vector = 0;
2861 pci_chipset_tag_t pc;
2862 pcitag_t tag;
2863 char intrbuf[PCI_INTRSTR_LEN];
2864 char wqname[MAXCOMLEN];
2865 char intr_xname[32];
2866 const char *intrstr = NULL;
2867 kcpuset_t *affinity;
2868 int cpu_id = 0;
2869
2870 pc = adapter->osdep.pc;
2871 tag = adapter->osdep.tag;
2872
2873 adapter->osdep.nintrs = adapter->num_queues + 1;
2874 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2875 adapter->osdep.nintrs) != 0) {
2876 aprint_error_dev(dev,
2877 "failed to allocate MSI-X interrupt\n");
2878 return (ENXIO);
2879 }
2880
2881 kcpuset_create(&affinity, false);
2882 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2883 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2884 device_xname(dev), i);
2885 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2886 sizeof(intrbuf));
2887 #ifdef IXGBE_MPSAFE
2888 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2889 true);
2890 #endif
2891 /* Set the handler function */
2892 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2893 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2894 intr_xname);
2895 if (que->res == NULL) {
2896 pci_intr_release(pc, adapter->osdep.intrs,
2897 adapter->osdep.nintrs);
2898 aprint_error_dev(dev,
2899 "Failed to register QUE handler\n");
2900 kcpuset_destroy(affinity);
2901 return (ENXIO);
2902 }
2903 que->msix = vector;
2904 adapter->active_queues |= (u64)(1 << que->msix);
2905
2906 cpu_id = i;
2907 /* Round-robin affinity */
2908 kcpuset_zero(affinity);
2909 kcpuset_set(affinity, cpu_id % ncpu);
2910 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2911 NULL);
2912 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2913 intrstr);
2914 if (error == 0)
2915 aprint_normal(", bound queue %d to cpu %d\n",
2916 i, cpu_id % ncpu);
2917 else
2918 aprint_normal("\n");
2919
2920 #ifndef IXGBE_LEGACY_TX
2921 txr->txr_si
2922 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2923 ixgbe_deferred_mq_start, txr);
2924 #endif
2925 que->que_si
2926 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2927 ixv_handle_que, que);
2928 if (que->que_si == NULL) {
2929 aprint_error_dev(dev,
2930 "could not establish software interrupt\n");
2931 }
2932 }
2933 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
2934 error = workqueue_create(&adapter->txr_wq, wqname,
2935 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2936 IXGBE_WORKQUEUE_FLAGS);
2937 if (error) {
2938 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
2939 }
2940 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
2941
2942 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
2943 error = workqueue_create(&adapter->que_wq, wqname,
2944 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2945 IXGBE_WORKQUEUE_FLAGS);
2946 if (error) {
2947 aprint_error_dev(dev,
2948 "couldn't create workqueue\n");
2949 }
2950
2951 /* and Mailbox */
2952 cpu_id++;
2953 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2954 adapter->vector = vector;
2955 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2956 sizeof(intrbuf));
2957 #ifdef IXGBE_MPSAFE
2958 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2959 true);
2960 #endif
2961 /* Set the mbx handler function */
2962 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2963 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2964 intr_xname);
2965 if (adapter->osdep.ihs[vector] == NULL) {
2966 adapter->res = NULL;
2967 aprint_error_dev(dev, "Failed to register LINK handler\n");
2968 kcpuset_destroy(affinity);
2969 return (ENXIO);
2970 }
2971 /* Round-robin affinity */
2972 kcpuset_zero(affinity);
2973 kcpuset_set(affinity, cpu_id % ncpu);
2974 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2975
2976 aprint_normal_dev(dev,
2977 "for link, interrupting at %s", intrstr);
2978 if (error == 0)
2979 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2980 else
2981 aprint_normal("\n");
2982
2983 /* Tasklets for Mailbox */
2984 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2985 ixv_handle_link, adapter);
2986 /*
2987 * Due to a broken design QEMU will fail to properly
2988 * enable the guest for MSI-X unless the vectors in
2989 * the table are all set up, so we must rewrite the
2990 * ENABLE in the MSI-X control register again at this
2991 * point to cause it to successfully initialize us.
2992 */
2993 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2994 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
2995 rid += PCI_MSIX_CTL;
2996 msix_ctrl = pci_conf_read(pc, tag, rid);
2997 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
2998 pci_conf_write(pc, tag, rid, msix_ctrl);
2999 }
3000
3001 kcpuset_destroy(affinity);
3002 return (0);
3003 } /* ixv_allocate_msix */
3004
3005 /************************************************************************
3006 * ixv_configure_interrupts - Setup MSI-X resources
3007 *
3008 * Note: The VF device MUST use MSI-X, there is no fallback.
3009 ************************************************************************/
3010 static int
3011 ixv_configure_interrupts(struct adapter *adapter)
3012 {
3013 device_t dev = adapter->dev;
3014 int want, queues, msgs;
3015
3016 /* Must have at least 2 MSI-X vectors */
3017 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3018 if (msgs < 2) {
3019 aprint_error_dev(dev, "MSIX config error\n");
3020 return (ENXIO);
3021 }
3022 msgs = MIN(msgs, IXG_MAX_NINTR);
3023
3024 /* Figure out a reasonable auto config value */
3025 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3026
3027 if (ixv_num_queues != 0)
3028 queues = ixv_num_queues;
3029 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3030 queues = IXGBE_VF_MAX_TX_QUEUES;
3031
3032 /*
3033 * Want vectors for the queues,
3034 * plus an additional for mailbox.
3035 */
3036 want = queues + 1;
3037 if (msgs >= want)
3038 msgs = want;
3039 else {
3040 aprint_error_dev(dev,
3041 "MSI-X Configuration Problem, "
3042 "%d vectors but %d queues wanted!\n",
3043 msgs, want);
3044 return -1;
3045 }
3046
3047 adapter->msix_mem = (void *)1; /* XXX */
3048 aprint_normal_dev(dev,
3049 "Using MSI-X interrupts with %d vectors\n", msgs);
3050 adapter->num_queues = queues;
3051
3052 return (0);
3053 } /* ixv_configure_interrupts */
3054
3055
3056 /************************************************************************
3057 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3058 *
3059 * Done outside of interrupt context since the driver might sleep
3060 ************************************************************************/
3061 static void
3062 ixv_handle_link(void *context)
3063 {
3064 struct adapter *adapter = context;
3065
3066 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3067 &adapter->link_up, FALSE);
3068 ixv_update_link_status(adapter);
3069 } /* ixv_handle_link */
3070
3071 /************************************************************************
3072 * ixv_check_link - Used in the local timer to poll for link changes
3073 ************************************************************************/
3074 static void
3075 ixv_check_link(struct adapter *adapter)
3076 {
3077 adapter->hw.mac.get_link_status = TRUE;
3078
3079 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3080 &adapter->link_up, FALSE);
3081 ixv_update_link_status(adapter);
3082 } /* ixv_check_link */
3083