ixv.c revision 1.84 1 /*$NetBSD: ixv.c,v 1.84 2018/03/02 10:21:01 knakahara Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
36
37
38 #ifdef _KERNEL_OPT
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 #include "opt_net_mpsafe.h"
42 #endif
43
44 #include "ixgbe.h"
45 #include "vlan.h"
46
47 /************************************************************************
48 * Driver version
49 ************************************************************************/
50 char ixv_driver_version[] = "1.5.13-k";
51
52 /************************************************************************
53 * PCI Device ID Table
54 *
55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s
58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/
61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
62 {
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */
69 {0, 0, 0, 0, 0}
70 };
71
72 /************************************************************************
73 * Table of branding strings
74 ************************************************************************/
75 static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77 };
78
79 /*********************************************************************
80 * Function prototypes
81 *********************************************************************/
82 static int ixv_probe(device_t, cfdata_t, void *);
83 static void ixv_attach(device_t, device_t, void *);
84 static int ixv_detach(device_t, int);
85 #if 0
86 static int ixv_shutdown(device_t);
87 #endif
88 static int ixv_ifflags_cb(struct ethercom *);
89 static int ixv_ioctl(struct ifnet *, u_long, void *);
90 static int ixv_init(struct ifnet *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_ifstop(struct ifnet *, int);
93 static void ixv_stop(void *);
94 static void ixv_init_device_features(struct adapter *);
95 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96 static int ixv_media_change(struct ifnet *);
97 static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *);
99 static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *);
101 static int ixv_configure_interrupts(struct adapter *);
102 static void ixv_free_pci_resources(struct adapter *);
103 static void ixv_local_timer(void *);
104 static void ixv_local_timer_locked(void *);
105 static int ixv_setup_interface(device_t, struct adapter *);
106 static int ixv_negotiate_api(struct adapter *);
107
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_initialize_receive_units(struct adapter *);
110 static void ixv_initialize_rss_mapping(struct adapter *);
111 static void ixv_check_link(struct adapter *);
112
113 static void ixv_enable_intr(struct adapter *);
114 static void ixv_disable_intr(struct adapter *);
115 static void ixv_set_multi(struct adapter *);
116 static void ixv_update_link_status(struct adapter *);
117 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119 static void ixv_configure_ivars(struct adapter *);
120 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121 static void ixv_eitr_write(struct ix_queue *, uint32_t);
122
123 static void ixv_setup_vlan_support(struct adapter *);
124 #if 0
125 static void ixv_register_vlan(void *, struct ifnet *, u16);
126 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
127 #endif
128
129 static void ixv_add_device_sysctls(struct adapter *);
130 static void ixv_save_stats(struct adapter *);
131 static void ixv_init_stats(struct adapter *);
132 static void ixv_update_stats(struct adapter *);
133 static void ixv_add_stats_sysctls(struct adapter *);
134
135
136 /* Sysctl handlers */
137 static void ixv_set_sysctl_value(struct adapter *, const char *,
138 const char *, int *, int);
139 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
140 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
144
145 /* The MSI-X Interrupt handlers */
146 static int ixv_msix_que(void *);
147 static int ixv_msix_mbx(void *);
148
149 /* Deferred interrupt tasklets */
150 static void ixv_handle_que(void *);
151 static void ixv_handle_link(void *);
152
153 /* Workqueue handler for deferred work */
154 static void ixv_handle_que_work(struct work *, void *);
155
156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
157 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
158
159 /************************************************************************
160 * FreeBSD Device Interface Entry Points
161 ************************************************************************/
162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
164 DVF_DETACH_SHUTDOWN);
165
166 #if 0
167 static driver_t ixv_driver = {
168 "ixv", ixv_methods, sizeof(struct adapter),
169 };
170
171 devclass_t ixv_devclass;
172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
175 #endif
176
177 /*
178 * TUNEABLE PARAMETERS:
179 */
180
181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
182 static int ixv_num_queues = 0;
183 #define TUNABLE_INT(__x, __y)
184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
185
186 /*
187 * AIM: Adaptive Interrupt Moderation
188 * which means that the interrupt rate
189 * is varied over time based on the
190 * traffic for that interrupt vector
191 */
192 static bool ixv_enable_aim = false;
193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194
195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
197
198 /* How many packets rxeof tries to clean at a time */
199 static int ixv_rx_process_limit = 256;
200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201
202 /* How many packets txeof tries to clean at a time */
203 static int ixv_tx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
205
206 /* Which pakcet processing uses workqueue or softint */
207 static bool ixv_txrx_workqueue = false;
208
209 /*
210 * Number of TX descriptors per ring,
211 * setting higher than RX as this seems
212 * the better performing choice.
213 */
214 static int ixv_txd = PERFORM_TXD;
215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
216
217 /* Number of RX descriptors per ring */
218 static int ixv_rxd = PERFORM_RXD;
219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
220
221 /* Legacy Transmit (single queue) */
222 static int ixv_enable_legacy_tx = 0;
223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
224
225 #ifdef NET_MPSAFE
226 #define IXGBE_MPSAFE 1
227 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
228 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
229 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
230 #else
231 #define IXGBE_CALLOUT_FLAGS 0
232 #define IXGBE_SOFTINFT_FLAGS 0
233 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
234 #endif
235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
236
237 #if 0
238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
240 #endif
241
242 /************************************************************************
243 * ixv_probe - Device identification routine
244 *
245 * Determines if the driver should be loaded on
246 * adapter based on its PCI vendor/device ID.
247 *
248 * return BUS_PROBE_DEFAULT on success, positive on failure
249 ************************************************************************/
250 static int
251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
252 {
253 #ifdef __HAVE_PCI_MSI_MSIX
254 const struct pci_attach_args *pa = aux;
255
256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
257 #else
258 return 0;
259 #endif
260 } /* ixv_probe */
261
262 static ixgbe_vendor_info_t *
263 ixv_lookup(const struct pci_attach_args *pa)
264 {
265 ixgbe_vendor_info_t *ent;
266 pcireg_t subid;
267
268 INIT_DEBUGOUT("ixv_lookup: begin");
269
270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
271 return NULL;
272
273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
274
275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
279 (ent->subvendor_id == 0)) &&
280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
281 (ent->subdevice_id == 0))) {
282 return ent;
283 }
284 }
285
286 return NULL;
287 }
288
289 /************************************************************************
290 * ixv_attach - Device initialization routine
291 *
292 * Called when the driver is being loaded.
293 * Identifies the type of hardware, allocates all resources
294 * and initializes the hardware.
295 *
296 * return 0 on success, positive on failure
297 ************************************************************************/
298 static void
299 ixv_attach(device_t parent, device_t dev, void *aux)
300 {
301 struct adapter *adapter;
302 struct ixgbe_hw *hw;
303 int error = 0;
304 pcireg_t id, subid;
305 ixgbe_vendor_info_t *ent;
306 const struct pci_attach_args *pa = aux;
307 const char *apivstr;
308 const char *str;
309 char buf[256];
310
311 INIT_DEBUGOUT("ixv_attach: begin");
312
313 /*
314 * Make sure BUSMASTER is set, on a VM under
315 * KVM it may not be and will break things.
316 */
317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
318
319 /* Allocate, clear, and link in our adapter structure */
320 adapter = device_private(dev);
321 adapter->dev = dev;
322 adapter->hw.back = adapter;
323 hw = &adapter->hw;
324
325 adapter->init_locked = ixv_init_locked;
326 adapter->stop_locked = ixv_stop;
327
328 adapter->osdep.pc = pa->pa_pc;
329 adapter->osdep.tag = pa->pa_tag;
330 if (pci_dma64_available(pa))
331 adapter->osdep.dmat = pa->pa_dmat64;
332 else
333 adapter->osdep.dmat = pa->pa_dmat;
334 adapter->osdep.attached = false;
335
336 ent = ixv_lookup(pa);
337
338 KASSERT(ent != NULL);
339
340 aprint_normal(": %s, Version - %s\n",
341 ixv_strings[ent->index], ixv_driver_version);
342
343 /* Core Lock Init*/
344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
345
346 /* Do base PCI setup - map BAR0 */
347 if (ixv_allocate_pci_resources(adapter, pa)) {
348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
349 error = ENXIO;
350 goto err_out;
351 }
352
353 /* SYSCTL APIs */
354 ixv_add_device_sysctls(adapter);
355
356 /* Set up the timer callout */
357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
358
359 /* Save off the information about this board */
360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
362 hw->vendor_id = PCI_VENDOR(id);
363 hw->device_id = PCI_PRODUCT(id);
364 hw->revision_id =
365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
368
369 /* A subset of set_mac_type */
370 switch (hw->device_id) {
371 case IXGBE_DEV_ID_82599_VF:
372 hw->mac.type = ixgbe_mac_82599_vf;
373 str = "82599 VF";
374 break;
375 case IXGBE_DEV_ID_X540_VF:
376 hw->mac.type = ixgbe_mac_X540_vf;
377 str = "X540 VF";
378 break;
379 case IXGBE_DEV_ID_X550_VF:
380 hw->mac.type = ixgbe_mac_X550_vf;
381 str = "X550 VF";
382 break;
383 case IXGBE_DEV_ID_X550EM_X_VF:
384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
385 str = "X550EM X VF";
386 break;
387 case IXGBE_DEV_ID_X550EM_A_VF:
388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
389 str = "X550EM A VF";
390 break;
391 default:
392 /* Shouldn't get here since probe succeeded */
393 aprint_error_dev(dev, "Unknown device ID!\n");
394 error = ENXIO;
395 goto err_out;
396 break;
397 }
398 aprint_normal_dev(dev, "device %s\n", str);
399
400 ixv_init_device_features(adapter);
401
402 /* Initialize the shared code */
403 error = ixgbe_init_ops_vf(hw);
404 if (error) {
405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
406 error = EIO;
407 goto err_out;
408 }
409
410 /* Setup the mailbox */
411 ixgbe_init_mbx_params_vf(hw);
412
413 /* Set the right number of segments */
414 adapter->num_segs = IXGBE_82599_SCATTER;
415
416 /* Reset mbox api to 1.0 */
417 error = hw->mac.ops.reset_hw(hw);
418 if (error == IXGBE_ERR_RESET_FAILED)
419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
420 else if (error)
421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
422 error);
423 if (error) {
424 error = EIO;
425 goto err_out;
426 }
427
428 error = hw->mac.ops.init_hw(hw);
429 if (error) {
430 aprint_error_dev(dev, "...init_hw() failed!\n");
431 error = EIO;
432 goto err_out;
433 }
434
435 /* Negotiate mailbox API version */
436 error = ixv_negotiate_api(adapter);
437 if (error)
438 aprint_normal_dev(dev,
439 "MBX API negotiation failed during attach!\n");
440 switch (hw->api_version) {
441 case ixgbe_mbox_api_10:
442 apivstr = "1.0";
443 break;
444 case ixgbe_mbox_api_20:
445 apivstr = "2.0";
446 break;
447 case ixgbe_mbox_api_11:
448 apivstr = "1.1";
449 break;
450 case ixgbe_mbox_api_12:
451 apivstr = "1.2";
452 break;
453 case ixgbe_mbox_api_13:
454 apivstr = "1.3";
455 break;
456 default:
457 apivstr = "unknown";
458 break;
459 }
460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
461
462 /* If no mac address was assigned, make a random one */
463 if (!ixv_check_ether_addr(hw->mac.addr)) {
464 u8 addr[ETHER_ADDR_LEN];
465 uint64_t rndval = cprng_strong64();
466
467 memcpy(addr, &rndval, sizeof(addr));
468 addr[0] &= 0xFE;
469 addr[0] |= 0x02;
470 bcopy(addr, hw->mac.addr, sizeof(addr));
471 }
472
473 /* Register for VLAN events */
474 #if 0 /* XXX delete after write? */
475 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
476 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
477 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
478 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
479 #endif
480
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixv_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixv_rx_process_limit);
485
486 ixv_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixv_tx_process_limit);
489
490 /* Do descriptor calc and sanity checks */
491 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
493 aprint_error_dev(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
495 } else
496 adapter->num_tx_desc = ixv_txd;
497
498 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
499 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
500 aprint_error_dev(dev, "RXD config issue, using default!\n");
501 adapter->num_rx_desc = DEFAULT_RXD;
502 } else
503 adapter->num_rx_desc = ixv_rxd;
504
505 /* Setup MSI-X */
506 error = ixv_configure_interrupts(adapter);
507 if (error)
508 goto err_out;
509
510 /* Allocate our TX/RX Queues */
511 if (ixgbe_allocate_queues(adapter)) {
512 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
513 error = ENOMEM;
514 goto err_out;
515 }
516
517 /* hw.ix defaults init */
518 adapter->enable_aim = ixv_enable_aim;
519
520 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
521
522 error = ixv_allocate_msix(adapter, pa);
523 if (error) {
524 device_printf(dev, "ixv_allocate_msix() failed!\n");
525 goto err_late;
526 }
527
528 /* Setup OS specific network interface */
529 error = ixv_setup_interface(dev, adapter);
530 if (error != 0) {
531 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
532 goto err_late;
533 }
534
535 /* Do the stats setup */
536 ixv_save_stats(adapter);
537 ixv_init_stats(adapter);
538 ixv_add_stats_sysctls(adapter);
539
540 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
541 ixgbe_netmap_attach(adapter);
542
543 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
544 aprint_verbose_dev(dev, "feature cap %s\n", buf);
545 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
546 aprint_verbose_dev(dev, "feature ena %s\n", buf);
547
548 INIT_DEBUGOUT("ixv_attach: end");
549 adapter->osdep.attached = true;
550
551 return;
552
553 err_late:
554 ixgbe_free_transmit_structures(adapter);
555 ixgbe_free_receive_structures(adapter);
556 free(adapter->queues, M_DEVBUF);
557 err_out:
558 ixv_free_pci_resources(adapter);
559 IXGBE_CORE_LOCK_DESTROY(adapter);
560
561 return;
562 } /* ixv_attach */
563
564 /************************************************************************
565 * ixv_detach - Device removal routine
566 *
567 * Called when the driver is being removed.
568 * Stops the adapter and deallocates all the resources
569 * that were allocated for driver operation.
570 *
571 * return 0 on success, positive on failure
572 ************************************************************************/
573 static int
574 ixv_detach(device_t dev, int flags)
575 {
576 struct adapter *adapter = device_private(dev);
577 struct ixgbe_hw *hw = &adapter->hw;
578 struct ix_queue *que = adapter->queues;
579 struct tx_ring *txr = adapter->tx_rings;
580 struct rx_ring *rxr = adapter->rx_rings;
581 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
582
583 INIT_DEBUGOUT("ixv_detach: begin");
584 if (adapter->osdep.attached == false)
585 return 0;
586
587 /* Stop the interface. Callouts are stopped in it. */
588 ixv_ifstop(adapter->ifp, 1);
589
590 #if NVLAN > 0
591 /* Make sure VLANs are not using driver */
592 if (!VLAN_ATTACHED(&adapter->osdep.ec))
593 ; /* nothing to do: no VLANs */
594 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
595 vlan_ifdetach(adapter->ifp);
596 else {
597 aprint_error_dev(dev, "VLANs in use, detach first\n");
598 return EBUSY;
599 }
600 #endif
601
602 IXGBE_CORE_LOCK(adapter);
603 ixv_stop(adapter);
604 IXGBE_CORE_UNLOCK(adapter);
605
606 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
607 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
608 softint_disestablish(txr->txr_si);
609 softint_disestablish(que->que_si);
610 }
611 if (adapter->txr_wq != NULL)
612 workqueue_destroy(adapter->txr_wq);
613 if (adapter->txr_wq_enqueued != NULL)
614 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
615 if (adapter->que_wq != NULL)
616 workqueue_destroy(adapter->que_wq);
617
618 /* Drain the Mailbox(link) queue */
619 softint_disestablish(adapter->link_si);
620
621 /* Unregister VLAN events */
622 #if 0 /* XXX msaitoh delete after write? */
623 if (adapter->vlan_attach != NULL)
624 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
625 if (adapter->vlan_detach != NULL)
626 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
627 #endif
628
629 ether_ifdetach(adapter->ifp);
630 callout_halt(&adapter->timer, NULL);
631
632 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
633 netmap_detach(adapter->ifp);
634
635 ixv_free_pci_resources(adapter);
636 #if 0 /* XXX the NetBSD port is probably missing something here */
637 bus_generic_detach(dev);
638 #endif
639 if_detach(adapter->ifp);
640 if_percpuq_destroy(adapter->ipq);
641
642 sysctl_teardown(&adapter->sysctllog);
643 evcnt_detach(&adapter->handleq);
644 evcnt_detach(&adapter->req);
645 evcnt_detach(&adapter->efbig_tx_dma_setup);
646 evcnt_detach(&adapter->mbuf_defrag_failed);
647 evcnt_detach(&adapter->efbig2_tx_dma_setup);
648 evcnt_detach(&adapter->einval_tx_dma_setup);
649 evcnt_detach(&adapter->other_tx_dma_setup);
650 evcnt_detach(&adapter->eagain_tx_dma_setup);
651 evcnt_detach(&adapter->enomem_tx_dma_setup);
652 evcnt_detach(&adapter->watchdog_events);
653 evcnt_detach(&adapter->tso_err);
654 evcnt_detach(&adapter->link_irq);
655
656 txr = adapter->tx_rings;
657 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
658 evcnt_detach(&adapter->queues[i].irqs);
659 evcnt_detach(&txr->no_desc_avail);
660 evcnt_detach(&txr->total_packets);
661 evcnt_detach(&txr->tso_tx);
662 #ifndef IXGBE_LEGACY_TX
663 evcnt_detach(&txr->pcq_drops);
664 #endif
665
666 evcnt_detach(&rxr->rx_packets);
667 evcnt_detach(&rxr->rx_bytes);
668 evcnt_detach(&rxr->rx_copies);
669 evcnt_detach(&rxr->no_jmbuf);
670 evcnt_detach(&rxr->rx_discarded);
671 }
672 evcnt_detach(&stats->ipcs);
673 evcnt_detach(&stats->l4cs);
674 evcnt_detach(&stats->ipcs_bad);
675 evcnt_detach(&stats->l4cs_bad);
676
677 /* Packet Reception Stats */
678 evcnt_detach(&stats->vfgorc);
679 evcnt_detach(&stats->vfgprc);
680 evcnt_detach(&stats->vfmprc);
681
682 /* Packet Transmission Stats */
683 evcnt_detach(&stats->vfgotc);
684 evcnt_detach(&stats->vfgptc);
685
686 /* Mailbox Stats */
687 evcnt_detach(&hw->mbx.stats.msgs_tx);
688 evcnt_detach(&hw->mbx.stats.msgs_rx);
689 evcnt_detach(&hw->mbx.stats.acks);
690 evcnt_detach(&hw->mbx.stats.reqs);
691 evcnt_detach(&hw->mbx.stats.rsts);
692
693 ixgbe_free_transmit_structures(adapter);
694 ixgbe_free_receive_structures(adapter);
695 for (int i = 0; i < adapter->num_queues; i++) {
696 struct ix_queue *lque = &adapter->queues[i];
697 mutex_destroy(&lque->im_mtx);
698 }
699 free(adapter->queues, M_DEVBUF);
700
701 IXGBE_CORE_LOCK_DESTROY(adapter);
702
703 return (0);
704 } /* ixv_detach */
705
706 /************************************************************************
707 * ixv_init_locked - Init entry point
708 *
709 * Used in two ways: It is used by the stack as an init entry
710 * point in network interface structure. It is also used
711 * by the driver as a hw/sw initialization routine to get
712 * to a consistent state.
713 *
714 * return 0 on success, positive on failure
715 ************************************************************************/
716 static void
717 ixv_init_locked(struct adapter *adapter)
718 {
719 struct ifnet *ifp = adapter->ifp;
720 device_t dev = adapter->dev;
721 struct ixgbe_hw *hw = &adapter->hw;
722 struct ix_queue *que = adapter->queues;
723 int error = 0;
724 uint32_t mask;
725 int i;
726
727 INIT_DEBUGOUT("ixv_init_locked: begin");
728 KASSERT(mutex_owned(&adapter->core_mtx));
729 hw->adapter_stopped = FALSE;
730 hw->mac.ops.stop_adapter(hw);
731 callout_stop(&adapter->timer);
732
733 /* reprogram the RAR[0] in case user changed it. */
734 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
735
736 /* Get the latest mac address, User can use a LAA */
737 memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
738 IXGBE_ETH_LENGTH_OF_ADDRESS);
739 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
740
741 /* Prepare transmit descriptors and buffers */
742 if (ixgbe_setup_transmit_structures(adapter)) {
743 aprint_error_dev(dev, "Could not setup transmit structures\n");
744 ixv_stop(adapter);
745 return;
746 }
747
748 /* Reset VF and renegotiate mailbox API version */
749 hw->mac.ops.reset_hw(hw);
750 error = ixv_negotiate_api(adapter);
751 if (error)
752 device_printf(dev,
753 "Mailbox API negotiation failed in init_locked!\n");
754
755 ixv_initialize_transmit_units(adapter);
756
757 /* Setup Multicast table */
758 ixv_set_multi(adapter);
759
760 /*
761 * Determine the correct mbuf pool
762 * for doing jumbo/headersplit
763 */
764 if (ifp->if_mtu > ETHERMTU)
765 adapter->rx_mbuf_sz = MJUMPAGESIZE;
766 else
767 adapter->rx_mbuf_sz = MCLBYTES;
768
769 /* Prepare receive descriptors and buffers */
770 if (ixgbe_setup_receive_structures(adapter)) {
771 device_printf(dev, "Could not setup receive structures\n");
772 ixv_stop(adapter);
773 return;
774 }
775
776 /* Configure RX settings */
777 ixv_initialize_receive_units(adapter);
778
779 #if 0 /* XXX isn't it required? -- msaitoh */
780 /* Set the various hardware offload abilities */
781 ifp->if_hwassist = 0;
782 if (ifp->if_capenable & IFCAP_TSO4)
783 ifp->if_hwassist |= CSUM_TSO;
784 if (ifp->if_capenable & IFCAP_TXCSUM) {
785 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
786 #if __FreeBSD_version >= 800000
787 ifp->if_hwassist |= CSUM_SCTP;
788 #endif
789 }
790 #endif
791
792 /* Set up VLAN offload and filter */
793 ixv_setup_vlan_support(adapter);
794
795 /* Set up MSI-X routing */
796 ixv_configure_ivars(adapter);
797
798 /* Set up auto-mask */
799 mask = (1 << adapter->vector);
800 for (i = 0; i < adapter->num_queues; i++, que++)
801 mask |= (1 << que->msix);
802 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
803
804 /* Set moderation on the Link interrupt */
805 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
806
807 /* Stats init */
808 ixv_init_stats(adapter);
809
810 /* Config/Enable Link */
811 hw->mac.get_link_status = TRUE;
812 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
813 FALSE);
814
815 /* Start watchdog */
816 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
817
818 /* And now turn on interrupts */
819 ixv_enable_intr(adapter);
820
821 /* Update saved flags. See ixgbe_ifflags_cb() */
822 adapter->if_flags = ifp->if_flags;
823
824 /* Now inform the stack we're ready */
825 ifp->if_flags |= IFF_RUNNING;
826 ifp->if_flags &= ~IFF_OACTIVE;
827
828 return;
829 } /* ixv_init_locked */
830
831 /*
832 * MSI-X Interrupt Handlers and Tasklets
833 */
834
835 static inline void
836 ixv_enable_queue(struct adapter *adapter, u32 vector)
837 {
838 struct ixgbe_hw *hw = &adapter->hw;
839 struct ix_queue *que = &adapter->queues[vector];
840 u32 queue = 1 << vector;
841 u32 mask;
842
843 mutex_enter(&que->im_mtx);
844 if (que->im_nest > 0 && --que->im_nest > 0)
845 goto out;
846
847 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
848 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
849 out:
850 mutex_exit(&que->im_mtx);
851 } /* ixv_enable_queue */
852
853 static inline void
854 ixv_disable_queue(struct adapter *adapter, u32 vector)
855 {
856 struct ixgbe_hw *hw = &adapter->hw;
857 struct ix_queue *que = &adapter->queues[vector];
858 u64 queue = (u64)(1 << vector);
859 u32 mask;
860
861 mutex_enter(&que->im_mtx);
862 if (que->im_nest++ > 0)
863 goto out;
864
865 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
866 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
867 out:
868 mutex_exit(&que->im_mtx);
869 } /* ixv_disable_queue */
870
871 static inline void
872 ixv_rearm_queues(struct adapter *adapter, u64 queues)
873 {
874 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
875 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
876 } /* ixv_rearm_queues */
877
878
879 /************************************************************************
880 * ixv_msix_que - MSI Queue Interrupt Service routine
881 ************************************************************************/
882 static int
883 ixv_msix_que(void *arg)
884 {
885 struct ix_queue *que = arg;
886 struct adapter *adapter = que->adapter;
887 struct tx_ring *txr = que->txr;
888 struct rx_ring *rxr = que->rxr;
889 bool more;
890 u32 newitr = 0;
891
892 ixv_disable_queue(adapter, que->msix);
893 ++que->irqs.ev_count;
894
895 #ifdef __NetBSD__
896 /* Don't run ixgbe_rxeof in interrupt context */
897 more = true;
898 #else
899 more = ixgbe_rxeof(que);
900 #endif
901
902 IXGBE_TX_LOCK(txr);
903 ixgbe_txeof(txr);
904 IXGBE_TX_UNLOCK(txr);
905
906 /* Do AIM now? */
907
908 if (adapter->enable_aim == false)
909 goto no_calc;
910 /*
911 * Do Adaptive Interrupt Moderation:
912 * - Write out last calculated setting
913 * - Calculate based on average size over
914 * the last interval.
915 */
916 if (que->eitr_setting)
917 ixv_eitr_write(que, que->eitr_setting);
918
919 que->eitr_setting = 0;
920
921 /* Idle, do nothing */
922 if ((txr->bytes == 0) && (rxr->bytes == 0))
923 goto no_calc;
924
925 if ((txr->bytes) && (txr->packets))
926 newitr = txr->bytes/txr->packets;
927 if ((rxr->bytes) && (rxr->packets))
928 newitr = max(newitr, (rxr->bytes / rxr->packets));
929 newitr += 24; /* account for hardware frame, crc */
930
931 /* set an upper boundary */
932 newitr = min(newitr, 3000);
933
934 /* Be nice to the mid range */
935 if ((newitr > 300) && (newitr < 1200))
936 newitr = (newitr / 3);
937 else
938 newitr = (newitr / 2);
939
940 /*
941 * When RSC is used, ITR interval must be larger than RSC_DELAY.
942 * Currently, we use 2us for RSC_DELAY. The minimum value is always
943 * greater than 2us on 100M (and 10M?(not documented)), but it's not
944 * on 1G and higher.
945 */
946 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
947 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
948 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
949 newitr = IXGBE_MIN_RSC_EITR_10G1G;
950 }
951
952 /* save for next interrupt */
953 que->eitr_setting = newitr;
954
955 /* Reset state */
956 txr->bytes = 0;
957 txr->packets = 0;
958 rxr->bytes = 0;
959 rxr->packets = 0;
960
961 no_calc:
962 if (more)
963 softint_schedule(que->que_si);
964 else /* Re-enable this interrupt */
965 ixv_enable_queue(adapter, que->msix);
966
967 return 1;
968 } /* ixv_msix_que */
969
970 /************************************************************************
971 * ixv_msix_mbx
972 ************************************************************************/
973 static int
974 ixv_msix_mbx(void *arg)
975 {
976 struct adapter *adapter = arg;
977 struct ixgbe_hw *hw = &adapter->hw;
978
979 ++adapter->link_irq.ev_count;
980 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
981
982 /* Link status change */
983 hw->mac.get_link_status = TRUE;
984 softint_schedule(adapter->link_si);
985
986 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
987
988 return 1;
989 } /* ixv_msix_mbx */
990
991 static void
992 ixv_eitr_write(struct ix_queue *que, uint32_t itr)
993 {
994 struct adapter *adapter = que->adapter;
995
996 /*
997 * Newer devices than 82598 have VF function, so this function is
998 * simple.
999 */
1000 itr |= IXGBE_EITR_CNT_WDIS;
1001
1002 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), itr);
1003 }
1004
1005
1006 /************************************************************************
1007 * ixv_media_status - Media Ioctl callback
1008 *
1009 * Called whenever the user queries the status of
1010 * the interface using ifconfig.
1011 ************************************************************************/
1012 static void
1013 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1014 {
1015 struct adapter *adapter = ifp->if_softc;
1016
1017 INIT_DEBUGOUT("ixv_media_status: begin");
1018 IXGBE_CORE_LOCK(adapter);
1019 ixv_update_link_status(adapter);
1020
1021 ifmr->ifm_status = IFM_AVALID;
1022 ifmr->ifm_active = IFM_ETHER;
1023
1024 if (!adapter->link_active) {
1025 ifmr->ifm_active |= IFM_NONE;
1026 IXGBE_CORE_UNLOCK(adapter);
1027 return;
1028 }
1029
1030 ifmr->ifm_status |= IFM_ACTIVE;
1031
1032 switch (adapter->link_speed) {
1033 case IXGBE_LINK_SPEED_10GB_FULL:
1034 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1035 break;
1036 case IXGBE_LINK_SPEED_5GB_FULL:
1037 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1038 break;
1039 case IXGBE_LINK_SPEED_2_5GB_FULL:
1040 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1041 break;
1042 case IXGBE_LINK_SPEED_1GB_FULL:
1043 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1044 break;
1045 case IXGBE_LINK_SPEED_100_FULL:
1046 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1047 break;
1048 case IXGBE_LINK_SPEED_10_FULL:
1049 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1050 break;
1051 }
1052
1053 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1054
1055 IXGBE_CORE_UNLOCK(adapter);
1056
1057 return;
1058 } /* ixv_media_status */
1059
1060 /************************************************************************
1061 * ixv_media_change - Media Ioctl callback
1062 *
1063 * Called when the user changes speed/duplex using
1064 * media/mediopt option with ifconfig.
1065 ************************************************************************/
1066 static int
1067 ixv_media_change(struct ifnet *ifp)
1068 {
1069 struct adapter *adapter = ifp->if_softc;
1070 struct ifmedia *ifm = &adapter->media;
1071
1072 INIT_DEBUGOUT("ixv_media_change: begin");
1073
1074 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1075 return (EINVAL);
1076
1077 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1078 case IFM_AUTO:
1079 break;
1080 default:
1081 device_printf(adapter->dev, "Only auto media type\n");
1082 return (EINVAL);
1083 }
1084
1085 return (0);
1086 } /* ixv_media_change */
1087
1088
1089 /************************************************************************
1090 * ixv_negotiate_api
1091 *
1092 * Negotiate the Mailbox API with the PF;
1093 * start with the most featured API first.
1094 ************************************************************************/
1095 static int
1096 ixv_negotiate_api(struct adapter *adapter)
1097 {
1098 struct ixgbe_hw *hw = &adapter->hw;
1099 int mbx_api[] = { ixgbe_mbox_api_11,
1100 ixgbe_mbox_api_10,
1101 ixgbe_mbox_api_unknown };
1102 int i = 0;
1103
1104 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1105 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1106 return (0);
1107 i++;
1108 }
1109
1110 return (EINVAL);
1111 } /* ixv_negotiate_api */
1112
1113
1114 /************************************************************************
1115 * ixv_set_multi - Multicast Update
1116 *
1117 * Called whenever multicast address list is updated.
1118 ************************************************************************/
1119 static void
1120 ixv_set_multi(struct adapter *adapter)
1121 {
1122 struct ether_multi *enm;
1123 struct ether_multistep step;
1124 struct ethercom *ec = &adapter->osdep.ec;
1125 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1126 u8 *update_ptr;
1127 int mcnt = 0;
1128
1129 KASSERT(mutex_owned(&adapter->core_mtx));
1130 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1131
1132 ETHER_LOCK(ec);
1133 ETHER_FIRST_MULTI(step, ec, enm);
1134 while (enm != NULL) {
1135 bcopy(enm->enm_addrlo,
1136 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1137 IXGBE_ETH_LENGTH_OF_ADDRESS);
1138 mcnt++;
1139 /* XXX This might be required --msaitoh */
1140 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1141 break;
1142 ETHER_NEXT_MULTI(step, enm);
1143 }
1144 ETHER_UNLOCK(ec);
1145
1146 update_ptr = mta;
1147
1148 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1149 ixv_mc_array_itr, TRUE);
1150
1151 return;
1152 } /* ixv_set_multi */
1153
1154 /************************************************************************
1155 * ixv_mc_array_itr
1156 *
1157 * An iterator function needed by the multicast shared code.
1158 * It feeds the shared code routine the addresses in the
1159 * array of ixv_set_multi() one by one.
1160 ************************************************************************/
1161 static u8 *
1162 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1163 {
1164 u8 *addr = *update_ptr;
1165 u8 *newptr;
1166 *vmdq = 0;
1167
1168 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1169 *update_ptr = newptr;
1170
1171 return addr;
1172 } /* ixv_mc_array_itr */
1173
1174 /************************************************************************
1175 * ixv_local_timer - Timer routine
1176 *
1177 * Checks for link status, updates statistics,
1178 * and runs the watchdog check.
1179 ************************************************************************/
1180 static void
1181 ixv_local_timer(void *arg)
1182 {
1183 struct adapter *adapter = arg;
1184
1185 IXGBE_CORE_LOCK(adapter);
1186 ixv_local_timer_locked(adapter);
1187 IXGBE_CORE_UNLOCK(adapter);
1188 }
1189
1190 static void
1191 ixv_local_timer_locked(void *arg)
1192 {
1193 struct adapter *adapter = arg;
1194 device_t dev = adapter->dev;
1195 struct ix_queue *que = adapter->queues;
1196 u64 queues = 0;
1197 int hung = 0;
1198
1199 KASSERT(mutex_owned(&adapter->core_mtx));
1200
1201 ixv_check_link(adapter);
1202
1203 /* Stats Update */
1204 ixv_update_stats(adapter);
1205
1206 /*
1207 * Check the TX queues status
1208 * - mark hung queues so we don't schedule on them
1209 * - watchdog only if all queues show hung
1210 */
1211 for (int i = 0; i < adapter->num_queues; i++, que++) {
1212 /* Keep track of queues with work for soft irq */
1213 if (que->txr->busy)
1214 queues |= ((u64)1 << que->me);
1215 /*
1216 * Each time txeof runs without cleaning, but there
1217 * are uncleaned descriptors it increments busy. If
1218 * we get to the MAX we declare it hung.
1219 */
1220 if (que->busy == IXGBE_QUEUE_HUNG) {
1221 ++hung;
1222 /* Mark the queue as inactive */
1223 adapter->active_queues &= ~((u64)1 << que->me);
1224 continue;
1225 } else {
1226 /* Check if we've come back from hung */
1227 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1228 adapter->active_queues |= ((u64)1 << que->me);
1229 }
1230 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1231 device_printf(dev,
1232 "Warning queue %d appears to be hung!\n", i);
1233 que->txr->busy = IXGBE_QUEUE_HUNG;
1234 ++hung;
1235 }
1236 }
1237
1238 /* Only truly watchdog if all queues show hung */
1239 if (hung == adapter->num_queues)
1240 goto watchdog;
1241 else if (queues != 0) { /* Force an IRQ on queues with work */
1242 ixv_rearm_queues(adapter, queues);
1243 }
1244
1245 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1246
1247 return;
1248
1249 watchdog:
1250
1251 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1252 adapter->ifp->if_flags &= ~IFF_RUNNING;
1253 adapter->watchdog_events.ev_count++;
1254 ixv_init_locked(adapter);
1255 } /* ixv_local_timer */
1256
1257 /************************************************************************
1258 * ixv_update_link_status - Update OS on link state
1259 *
1260 * Note: Only updates the OS on the cached link state.
1261 * The real check of the hardware only happens with
1262 * a link interrupt.
1263 ************************************************************************/
1264 static void
1265 ixv_update_link_status(struct adapter *adapter)
1266 {
1267 struct ifnet *ifp = adapter->ifp;
1268 device_t dev = adapter->dev;
1269
1270 if (adapter->link_up) {
1271 if (adapter->link_active == FALSE) {
1272 if (bootverbose) {
1273 const char *bpsmsg;
1274
1275 switch (adapter->link_speed) {
1276 case IXGBE_LINK_SPEED_10GB_FULL:
1277 bpsmsg = "10 Gbps";
1278 break;
1279 case IXGBE_LINK_SPEED_5GB_FULL:
1280 bpsmsg = "5 Gbps";
1281 break;
1282 case IXGBE_LINK_SPEED_2_5GB_FULL:
1283 bpsmsg = "2.5 Gbps";
1284 break;
1285 case IXGBE_LINK_SPEED_1GB_FULL:
1286 bpsmsg = "1 Gbps";
1287 break;
1288 case IXGBE_LINK_SPEED_100_FULL:
1289 bpsmsg = "100 Mbps";
1290 break;
1291 case IXGBE_LINK_SPEED_10_FULL:
1292 bpsmsg = "10 Mbps";
1293 break;
1294 default:
1295 bpsmsg = "unknown speed";
1296 break;
1297 }
1298 device_printf(dev, "Link is up %s %s \n",
1299 bpsmsg, "Full Duplex");
1300 }
1301 adapter->link_active = TRUE;
1302 if_link_state_change(ifp, LINK_STATE_UP);
1303 }
1304 } else { /* Link down */
1305 if (adapter->link_active == TRUE) {
1306 if (bootverbose)
1307 device_printf(dev, "Link is Down\n");
1308 if_link_state_change(ifp, LINK_STATE_DOWN);
1309 adapter->link_active = FALSE;
1310 }
1311 }
1312
1313 return;
1314 } /* ixv_update_link_status */
1315
1316
1317 /************************************************************************
1318 * ixv_stop - Stop the hardware
1319 *
1320 * Disables all traffic on the adapter by issuing a
1321 * global reset on the MAC and deallocates TX/RX buffers.
1322 ************************************************************************/
1323 static void
1324 ixv_ifstop(struct ifnet *ifp, int disable)
1325 {
1326 struct adapter *adapter = ifp->if_softc;
1327
1328 IXGBE_CORE_LOCK(adapter);
1329 ixv_stop(adapter);
1330 IXGBE_CORE_UNLOCK(adapter);
1331 }
1332
1333 static void
1334 ixv_stop(void *arg)
1335 {
1336 struct ifnet *ifp;
1337 struct adapter *adapter = arg;
1338 struct ixgbe_hw *hw = &adapter->hw;
1339
1340 ifp = adapter->ifp;
1341
1342 KASSERT(mutex_owned(&adapter->core_mtx));
1343
1344 INIT_DEBUGOUT("ixv_stop: begin\n");
1345 ixv_disable_intr(adapter);
1346
1347 /* Tell the stack that the interface is no longer active */
1348 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1349
1350 hw->mac.ops.reset_hw(hw);
1351 adapter->hw.adapter_stopped = FALSE;
1352 hw->mac.ops.stop_adapter(hw);
1353 callout_stop(&adapter->timer);
1354
1355 /* reprogram the RAR[0] in case user changed it. */
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1357
1358 return;
1359 } /* ixv_stop */
1360
1361
1362 /************************************************************************
1363 * ixv_allocate_pci_resources
1364 ************************************************************************/
1365 static int
1366 ixv_allocate_pci_resources(struct adapter *adapter,
1367 const struct pci_attach_args *pa)
1368 {
1369 pcireg_t memtype;
1370 device_t dev = adapter->dev;
1371 bus_addr_t addr;
1372 int flags;
1373
1374 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1375 switch (memtype) {
1376 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1377 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1378 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1379 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1380 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1381 goto map_err;
1382 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1383 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1384 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1385 }
1386 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1387 adapter->osdep.mem_size, flags,
1388 &adapter->osdep.mem_bus_space_handle) != 0) {
1389 map_err:
1390 adapter->osdep.mem_size = 0;
1391 aprint_error_dev(dev, "unable to map BAR0\n");
1392 return ENXIO;
1393 }
1394 break;
1395 default:
1396 aprint_error_dev(dev, "unexpected type on BAR0\n");
1397 return ENXIO;
1398 }
1399
1400 /* Pick up the tuneable queues */
1401 adapter->num_queues = ixv_num_queues;
1402
1403 return (0);
1404 } /* ixv_allocate_pci_resources */
1405
1406 /************************************************************************
1407 * ixv_free_pci_resources
1408 ************************************************************************/
1409 static void
1410 ixv_free_pci_resources(struct adapter * adapter)
1411 {
1412 struct ix_queue *que = adapter->queues;
1413 int rid;
1414
1415 /*
1416 * Release all msix queue resources:
1417 */
1418 for (int i = 0; i < adapter->num_queues; i++, que++) {
1419 if (que->res != NULL)
1420 pci_intr_disestablish(adapter->osdep.pc,
1421 adapter->osdep.ihs[i]);
1422 }
1423
1424
1425 /* Clean the Mailbox interrupt last */
1426 rid = adapter->vector;
1427
1428 if (adapter->osdep.ihs[rid] != NULL) {
1429 pci_intr_disestablish(adapter->osdep.pc,
1430 adapter->osdep.ihs[rid]);
1431 adapter->osdep.ihs[rid] = NULL;
1432 }
1433
1434 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1435 adapter->osdep.nintrs);
1436
1437 if (adapter->osdep.mem_size != 0) {
1438 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1439 adapter->osdep.mem_bus_space_handle,
1440 adapter->osdep.mem_size);
1441 }
1442
1443 return;
1444 } /* ixv_free_pci_resources */
1445
1446 /************************************************************************
1447 * ixv_setup_interface
1448 *
1449 * Setup networking device structure and register an interface.
1450 ************************************************************************/
1451 static int
1452 ixv_setup_interface(device_t dev, struct adapter *adapter)
1453 {
1454 struct ethercom *ec = &adapter->osdep.ec;
1455 struct ifnet *ifp;
1456 int rv;
1457
1458 INIT_DEBUGOUT("ixv_setup_interface: begin");
1459
1460 ifp = adapter->ifp = &ec->ec_if;
1461 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1462 ifp->if_baudrate = IF_Gbps(10);
1463 ifp->if_init = ixv_init;
1464 ifp->if_stop = ixv_ifstop;
1465 ifp->if_softc = adapter;
1466 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1467 #ifdef IXGBE_MPSAFE
1468 ifp->if_extflags = IFEF_MPSAFE;
1469 #endif
1470 ifp->if_ioctl = ixv_ioctl;
1471 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1472 #if 0
1473 ixv_start_locked = ixgbe_legacy_start_locked;
1474 #endif
1475 } else {
1476 ifp->if_transmit = ixgbe_mq_start;
1477 #if 0
1478 ixv_start_locked = ixgbe_mq_start_locked;
1479 #endif
1480 }
1481 ifp->if_start = ixgbe_legacy_start;
1482 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1483 IFQ_SET_READY(&ifp->if_snd);
1484
1485 rv = if_initialize(ifp);
1486 if (rv != 0) {
1487 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1488 return rv;
1489 }
1490 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1491 ether_ifattach(ifp, adapter->hw.mac.addr);
1492 /*
1493 * We use per TX queue softint, so if_deferred_start_init() isn't
1494 * used.
1495 */
1496 if_register(ifp);
1497 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1498
1499 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1500
1501 /*
1502 * Tell the upper layer(s) we support long frames.
1503 */
1504 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1505
1506 /* Set capability flags */
1507 ifp->if_capabilities |= IFCAP_HWCSUM
1508 | IFCAP_TSOv4
1509 | IFCAP_TSOv6;
1510 ifp->if_capenable = 0;
1511
1512 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1513 | ETHERCAP_VLAN_HWCSUM
1514 | ETHERCAP_JUMBO_MTU
1515 | ETHERCAP_VLAN_MTU;
1516
1517 /* Enable the above capabilities by default */
1518 ec->ec_capenable = ec->ec_capabilities;
1519
1520 /* Don't enable LRO by default */
1521 ifp->if_capabilities |= IFCAP_LRO;
1522 #if 0
1523 ifp->if_capenable = ifp->if_capabilities;
1524 #endif
1525
1526 /*
1527 * Specify the media types supported by this adapter and register
1528 * callbacks to update media and link information
1529 */
1530 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1531 ixv_media_status);
1532 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1533 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1534
1535 return 0;
1536 } /* ixv_setup_interface */
1537
1538
1539 /************************************************************************
1540 * ixv_initialize_transmit_units - Enable transmit unit.
1541 ************************************************************************/
1542 static void
1543 ixv_initialize_transmit_units(struct adapter *adapter)
1544 {
1545 struct tx_ring *txr = adapter->tx_rings;
1546 struct ixgbe_hw *hw = &adapter->hw;
1547
1548
1549 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1550 u64 tdba = txr->txdma.dma_paddr;
1551 u32 txctrl, txdctl;
1552
1553 /* Set WTHRESH to 8, burst writeback */
1554 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1555 txdctl |= (8 << 16);
1556 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1557
1558 /* Set the HW Tx Head and Tail indices */
1559 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1560 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1561
1562 /* Set Tx Tail register */
1563 txr->tail = IXGBE_VFTDT(i);
1564
1565 /* Set Ring parameters */
1566 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1567 (tdba & 0x00000000ffffffffULL));
1568 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1569 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1570 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1571 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1572 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1573 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1574
1575 /* Now enable */
1576 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1577 txdctl |= IXGBE_TXDCTL_ENABLE;
1578 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1579 }
1580
1581 return;
1582 } /* ixv_initialize_transmit_units */
1583
1584
1585 /************************************************************************
1586 * ixv_initialize_rss_mapping
1587 ************************************************************************/
1588 static void
1589 ixv_initialize_rss_mapping(struct adapter *adapter)
1590 {
1591 struct ixgbe_hw *hw = &adapter->hw;
1592 u32 reta = 0, mrqc, rss_key[10];
1593 int queue_id;
1594 int i, j;
1595 u32 rss_hash_config;
1596
1597 /* force use default RSS key. */
1598 #ifdef __NetBSD__
1599 rss_getkey((uint8_t *) &rss_key);
1600 #else
1601 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1602 /* Fetch the configured RSS key */
1603 rss_getkey((uint8_t *)&rss_key);
1604 } else {
1605 /* set up random bits */
1606 cprng_fast(&rss_key, sizeof(rss_key));
1607 }
1608 #endif
1609
1610 /* Now fill out hash function seeds */
1611 for (i = 0; i < 10; i++)
1612 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1613
1614 /* Set up the redirection table */
1615 for (i = 0, j = 0; i < 64; i++, j++) {
1616 if (j == adapter->num_queues)
1617 j = 0;
1618
1619 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1620 /*
1621 * Fetch the RSS bucket id for the given indirection
1622 * entry. Cap it at the number of configured buckets
1623 * (which is num_queues.)
1624 */
1625 queue_id = rss_get_indirection_to_bucket(i);
1626 queue_id = queue_id % adapter->num_queues;
1627 } else
1628 queue_id = j;
1629
1630 /*
1631 * The low 8 bits are for hash value (n+0);
1632 * The next 8 bits are for hash value (n+1), etc.
1633 */
1634 reta >>= 8;
1635 reta |= ((uint32_t)queue_id) << 24;
1636 if ((i & 3) == 3) {
1637 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1638 reta = 0;
1639 }
1640 }
1641
1642 /* Perform hash on these packet types */
1643 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1644 rss_hash_config = rss_gethashconfig();
1645 else {
1646 /*
1647 * Disable UDP - IP fragments aren't currently being handled
1648 * and so we end up with a mix of 2-tuple and 4-tuple
1649 * traffic.
1650 */
1651 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1652 | RSS_HASHTYPE_RSS_TCP_IPV4
1653 | RSS_HASHTYPE_RSS_IPV6
1654 | RSS_HASHTYPE_RSS_TCP_IPV6;
1655 }
1656
1657 mrqc = IXGBE_MRQC_RSSEN;
1658 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1659 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1660 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1661 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1662 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1663 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1664 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1665 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1666 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1667 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1668 __func__);
1669 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1670 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1671 __func__);
1672 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1673 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1674 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1675 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1676 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1677 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1678 __func__);
1679 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1680 } /* ixv_initialize_rss_mapping */
1681
1682
1683 /************************************************************************
1684 * ixv_initialize_receive_units - Setup receive registers and features.
1685 ************************************************************************/
1686 static void
1687 ixv_initialize_receive_units(struct adapter *adapter)
1688 {
1689 struct rx_ring *rxr = adapter->rx_rings;
1690 struct ixgbe_hw *hw = &adapter->hw;
1691 struct ifnet *ifp = adapter->ifp;
1692 u32 bufsz, rxcsum, psrtype;
1693
1694 if (ifp->if_mtu > ETHERMTU)
1695 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1696 else
1697 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1698
1699 psrtype = IXGBE_PSRTYPE_TCPHDR
1700 | IXGBE_PSRTYPE_UDPHDR
1701 | IXGBE_PSRTYPE_IPV4HDR
1702 | IXGBE_PSRTYPE_IPV6HDR
1703 | IXGBE_PSRTYPE_L2HDR;
1704
1705 if (adapter->num_queues > 1)
1706 psrtype |= 1 << 29;
1707
1708 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1709
1710 /* Tell PF our max_frame size */
1711 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1712 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1713 }
1714
1715 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1716 u64 rdba = rxr->rxdma.dma_paddr;
1717 u32 reg, rxdctl;
1718
1719 /* Disable the queue */
1720 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1721 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1722 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1723 for (int j = 0; j < 10; j++) {
1724 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1725 IXGBE_RXDCTL_ENABLE)
1726 msec_delay(1);
1727 else
1728 break;
1729 }
1730 wmb();
1731 /* Setup the Base and Length of the Rx Descriptor Ring */
1732 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1733 (rdba & 0x00000000ffffffffULL));
1734 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1735 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1736 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1737
1738 /* Reset the ring indices */
1739 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1740 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1741
1742 /* Set up the SRRCTL register */
1743 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1744 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1745 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1746 reg |= bufsz;
1747 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1748 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1749
1750 /* Capture Rx Tail index */
1751 rxr->tail = IXGBE_VFRDT(rxr->me);
1752
1753 /* Do the queue enabling last */
1754 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1755 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1756 for (int k = 0; k < 10; k++) {
1757 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1758 IXGBE_RXDCTL_ENABLE)
1759 break;
1760 msec_delay(1);
1761 }
1762 wmb();
1763
1764 /* Set the Tail Pointer */
1765 /*
1766 * In netmap mode, we must preserve the buffers made
1767 * available to userspace before the if_init()
1768 * (this is true by default on the TX side, because
1769 * init makes all buffers available to userspace).
1770 *
1771 * netmap_reset() and the device specific routines
1772 * (e.g. ixgbe_setup_receive_rings()) map these
1773 * buffers at the end of the NIC ring, so here we
1774 * must set the RDT (tail) register to make sure
1775 * they are not overwritten.
1776 *
1777 * In this driver the NIC ring starts at RDH = 0,
1778 * RDT points to the last slot available for reception (?),
1779 * so RDT = num_rx_desc - 1 means the whole ring is available.
1780 */
1781 #ifdef DEV_NETMAP
1782 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1783 (ifp->if_capenable & IFCAP_NETMAP)) {
1784 struct netmap_adapter *na = NA(adapter->ifp);
1785 struct netmap_kring *kring = &na->rx_rings[i];
1786 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1787
1788 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1789 } else
1790 #endif /* DEV_NETMAP */
1791 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1792 adapter->num_rx_desc - 1);
1793 }
1794
1795 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1796
1797 ixv_initialize_rss_mapping(adapter);
1798
1799 if (adapter->num_queues > 1) {
1800 /* RSS and RX IPP Checksum are mutually exclusive */
1801 rxcsum |= IXGBE_RXCSUM_PCSD;
1802 }
1803
1804 if (ifp->if_capenable & IFCAP_RXCSUM)
1805 rxcsum |= IXGBE_RXCSUM_PCSD;
1806
1807 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1808 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1809
1810 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1811
1812 return;
1813 } /* ixv_initialize_receive_units */
1814
1815 /************************************************************************
1816 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1817 *
1818 * Retrieves the TDH value from the hardware
1819 ************************************************************************/
1820 static int
1821 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1822 {
1823 struct sysctlnode node = *rnode;
1824 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1825 uint32_t val;
1826
1827 if (!txr)
1828 return (0);
1829
1830 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1831 node.sysctl_data = &val;
1832 return sysctl_lookup(SYSCTLFN_CALL(&node));
1833 } /* ixv_sysctl_tdh_handler */
1834
1835 /************************************************************************
1836 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1837 *
1838 * Retrieves the TDT value from the hardware
1839 ************************************************************************/
1840 static int
1841 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1842 {
1843 struct sysctlnode node = *rnode;
1844 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1845 uint32_t val;
1846
1847 if (!txr)
1848 return (0);
1849
1850 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1851 node.sysctl_data = &val;
1852 return sysctl_lookup(SYSCTLFN_CALL(&node));
1853 } /* ixv_sysctl_tdt_handler */
1854
1855 /************************************************************************
1856 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1857 *
1858 * Retrieves the RDH value from the hardware
1859 ************************************************************************/
1860 static int
1861 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1862 {
1863 struct sysctlnode node = *rnode;
1864 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1865 uint32_t val;
1866
1867 if (!rxr)
1868 return (0);
1869
1870 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1871 node.sysctl_data = &val;
1872 return sysctl_lookup(SYSCTLFN_CALL(&node));
1873 } /* ixv_sysctl_rdh_handler */
1874
1875 /************************************************************************
1876 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1877 *
1878 * Retrieves the RDT value from the hardware
1879 ************************************************************************/
1880 static int
1881 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1882 {
1883 struct sysctlnode node = *rnode;
1884 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1885 uint32_t val;
1886
1887 if (!rxr)
1888 return (0);
1889
1890 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1891 node.sysctl_data = &val;
1892 return sysctl_lookup(SYSCTLFN_CALL(&node));
1893 } /* ixv_sysctl_rdt_handler */
1894
1895 /************************************************************************
1896 * ixv_setup_vlan_support
1897 ************************************************************************/
1898 static void
1899 ixv_setup_vlan_support(struct adapter *adapter)
1900 {
1901 struct ethercom *ec = &adapter->osdep.ec;
1902 struct ixgbe_hw *hw = &adapter->hw;
1903 struct rx_ring *rxr;
1904 u32 ctrl, vid, vfta, retry;
1905
1906 /*
1907 * We get here thru init_locked, meaning
1908 * a soft reset, this has already cleared
1909 * the VFTA and other state, so if there
1910 * have been no vlan's registered do nothing.
1911 */
1912 if (!VLAN_ATTACHED(ec))
1913 return;
1914
1915 /* Enable the queues */
1916 for (int i = 0; i < adapter->num_queues; i++) {
1917 rxr = &adapter->rx_rings[i];
1918 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1919 ctrl |= IXGBE_RXDCTL_VME;
1920 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1921 /*
1922 * Let Rx path know that it needs to store VLAN tag
1923 * as part of extra mbuf info.
1924 */
1925 rxr->vtag_strip = TRUE;
1926 }
1927
1928 #if 1
1929 /* XXX dirty hack. Enable all VIDs */
1930 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1931 adapter->shadow_vfta[i] = 0xffffffff;
1932 #endif
1933 /*
1934 * A soft reset zero's out the VFTA, so
1935 * we need to repopulate it now.
1936 */
1937 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1938 if (adapter->shadow_vfta[i] == 0)
1939 continue;
1940 vfta = adapter->shadow_vfta[i];
1941 /*
1942 * Reconstruct the vlan id's
1943 * based on the bits set in each
1944 * of the array ints.
1945 */
1946 for (int j = 0; j < 32; j++) {
1947 retry = 0;
1948 if ((vfta & (1 << j)) == 0)
1949 continue;
1950 vid = (i * 32) + j;
1951 /* Call the shared code mailbox routine */
1952 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1953 if (++retry > 5)
1954 break;
1955 }
1956 }
1957 }
1958 } /* ixv_setup_vlan_support */
1959
1960 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
1961 /************************************************************************
1962 * ixv_register_vlan
1963 *
1964 * Run via a vlan config EVENT, it enables us to use the
1965 * HW Filter table since we can get the vlan id. This just
1966 * creates the entry in the soft version of the VFTA, init
1967 * will repopulate the real table.
1968 ************************************************************************/
1969 static void
1970 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1971 {
1972 struct adapter *adapter = ifp->if_softc;
1973 u16 index, bit;
1974
1975 if (ifp->if_softc != arg) /* Not our event */
1976 return;
1977
1978 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1979 return;
1980
1981 IXGBE_CORE_LOCK(adapter);
1982 index = (vtag >> 5) & 0x7F;
1983 bit = vtag & 0x1F;
1984 adapter->shadow_vfta[index] |= (1 << bit);
1985 /* Re-init to load the changes */
1986 ixv_init_locked(adapter);
1987 IXGBE_CORE_UNLOCK(adapter);
1988 } /* ixv_register_vlan */
1989
1990 /************************************************************************
1991 * ixv_unregister_vlan
1992 *
1993 * Run via a vlan unconfig EVENT, remove our entry
1994 * in the soft vfta.
1995 ************************************************************************/
1996 static void
1997 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1998 {
1999 struct adapter *adapter = ifp->if_softc;
2000 u16 index, bit;
2001
2002 if (ifp->if_softc != arg)
2003 return;
2004
2005 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2006 return;
2007
2008 IXGBE_CORE_LOCK(adapter);
2009 index = (vtag >> 5) & 0x7F;
2010 bit = vtag & 0x1F;
2011 adapter->shadow_vfta[index] &= ~(1 << bit);
2012 /* Re-init to load the changes */
2013 ixv_init_locked(adapter);
2014 IXGBE_CORE_UNLOCK(adapter);
2015 } /* ixv_unregister_vlan */
2016 #endif
2017
2018 /************************************************************************
2019 * ixv_enable_intr
2020 ************************************************************************/
2021 static void
2022 ixv_enable_intr(struct adapter *adapter)
2023 {
2024 struct ixgbe_hw *hw = &adapter->hw;
2025 struct ix_queue *que = adapter->queues;
2026 u32 mask;
2027 int i;
2028
2029 /* For VTEIAC */
2030 mask = (1 << adapter->vector);
2031 for (i = 0; i < adapter->num_queues; i++, que++)
2032 mask |= (1 << que->msix);
2033 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2034
2035 /* For VTEIMS */
2036 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2037 que = adapter->queues;
2038 for (i = 0; i < adapter->num_queues; i++, que++)
2039 ixv_enable_queue(adapter, que->msix);
2040
2041 IXGBE_WRITE_FLUSH(hw);
2042
2043 return;
2044 } /* ixv_enable_intr */
2045
2046 /************************************************************************
2047 * ixv_disable_intr
2048 ************************************************************************/
2049 static void
2050 ixv_disable_intr(struct adapter *adapter)
2051 {
2052 struct ix_queue *que = adapter->queues;
2053
2054 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2055
2056 /* disable interrupts other than queues */
2057 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2058
2059 for (int i = 0; i < adapter->num_queues; i++, que++)
2060 ixv_disable_queue(adapter, que->msix);
2061
2062 IXGBE_WRITE_FLUSH(&adapter->hw);
2063
2064 return;
2065 } /* ixv_disable_intr */
2066
2067 /************************************************************************
2068 * ixv_set_ivar
2069 *
2070 * Setup the correct IVAR register for a particular MSI-X interrupt
2071 * - entry is the register array entry
2072 * - vector is the MSI-X vector for this queue
2073 * - type is RX/TX/MISC
2074 ************************************************************************/
2075 static void
2076 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2077 {
2078 struct ixgbe_hw *hw = &adapter->hw;
2079 u32 ivar, index;
2080
2081 vector |= IXGBE_IVAR_ALLOC_VAL;
2082
2083 if (type == -1) { /* MISC IVAR */
2084 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2085 ivar &= ~0xFF;
2086 ivar |= vector;
2087 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2088 } else { /* RX/TX IVARS */
2089 index = (16 * (entry & 1)) + (8 * type);
2090 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2091 ivar &= ~(0xFF << index);
2092 ivar |= (vector << index);
2093 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2094 }
2095 } /* ixv_set_ivar */
2096
2097 /************************************************************************
2098 * ixv_configure_ivars
2099 ************************************************************************/
2100 static void
2101 ixv_configure_ivars(struct adapter *adapter)
2102 {
2103 struct ix_queue *que = adapter->queues;
2104
2105 /* XXX We should sync EITR value calculation with ixgbe.c? */
2106
2107 for (int i = 0; i < adapter->num_queues; i++, que++) {
2108 /* First the RX queue entry */
2109 ixv_set_ivar(adapter, i, que->msix, 0);
2110 /* ... and the TX */
2111 ixv_set_ivar(adapter, i, que->msix, 1);
2112 /* Set an initial value in EITR */
2113 ixv_eitr_write(que, IXGBE_EITR_DEFAULT);
2114 }
2115
2116 /* For the mailbox interrupt */
2117 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2118 } /* ixv_configure_ivars */
2119
2120
2121 /************************************************************************
2122 * ixv_save_stats
2123 *
2124 * The VF stats registers never have a truly virgin
2125 * starting point, so this routine tries to make an
2126 * artificial one, marking ground zero on attach as
2127 * it were.
2128 ************************************************************************/
2129 static void
2130 ixv_save_stats(struct adapter *adapter)
2131 {
2132 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2133
2134 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2135 stats->saved_reset_vfgprc +=
2136 stats->vfgprc.ev_count - stats->base_vfgprc;
2137 stats->saved_reset_vfgptc +=
2138 stats->vfgptc.ev_count - stats->base_vfgptc;
2139 stats->saved_reset_vfgorc +=
2140 stats->vfgorc.ev_count - stats->base_vfgorc;
2141 stats->saved_reset_vfgotc +=
2142 stats->vfgotc.ev_count - stats->base_vfgotc;
2143 stats->saved_reset_vfmprc +=
2144 stats->vfmprc.ev_count - stats->base_vfmprc;
2145 }
2146 } /* ixv_save_stats */
2147
2148 /************************************************************************
2149 * ixv_init_stats
2150 ************************************************************************/
2151 static void
2152 ixv_init_stats(struct adapter *adapter)
2153 {
2154 struct ixgbe_hw *hw = &adapter->hw;
2155
2156 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2157 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2158 adapter->stats.vf.last_vfgorc |=
2159 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2160
2161 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2162 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2163 adapter->stats.vf.last_vfgotc |=
2164 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2165
2166 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2167
2168 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2169 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2170 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2171 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2172 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2173 } /* ixv_init_stats */
2174
2175 #define UPDATE_STAT_32(reg, last, count) \
2176 { \
2177 u32 current = IXGBE_READ_REG(hw, (reg)); \
2178 if (current < (last)) \
2179 count.ev_count += 0x100000000LL; \
2180 (last) = current; \
2181 count.ev_count &= 0xFFFFFFFF00000000LL; \
2182 count.ev_count |= current; \
2183 }
2184
2185 #define UPDATE_STAT_36(lsb, msb, last, count) \
2186 { \
2187 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2188 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2189 u64 current = ((cur_msb << 32) | cur_lsb); \
2190 if (current < (last)) \
2191 count.ev_count += 0x1000000000LL; \
2192 (last) = current; \
2193 count.ev_count &= 0xFFFFFFF000000000LL; \
2194 count.ev_count |= current; \
2195 }
2196
2197 /************************************************************************
2198 * ixv_update_stats - Update the board statistics counters.
2199 ************************************************************************/
2200 void
2201 ixv_update_stats(struct adapter *adapter)
2202 {
2203 struct ixgbe_hw *hw = &adapter->hw;
2204 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2205
2206 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2207 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2208 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2209 stats->vfgorc);
2210 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2211 stats->vfgotc);
2212 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2213
2214 /* Fill out the OS statistics structure */
2215 /*
2216 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2217 * adapter->stats counters. It's required to make ifconfig -z
2218 * (SOICZIFDATA) work.
2219 */
2220 } /* ixv_update_stats */
2221
2222 /************************************************************************
2223 * ixv_sysctl_interrupt_rate_handler
2224 ************************************************************************/
2225 static int
2226 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2227 {
2228 struct sysctlnode node = *rnode;
2229 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2230 struct adapter *adapter = que->adapter;
2231 uint32_t reg, usec, rate;
2232 int error;
2233
2234 if (que == NULL)
2235 return 0;
2236 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2237 usec = ((reg & 0x0FF8) >> 3);
2238 if (usec > 0)
2239 rate = 500000 / usec;
2240 else
2241 rate = 0;
2242 node.sysctl_data = &rate;
2243 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2244 if (error || newp == NULL)
2245 return error;
2246 reg &= ~0xfff; /* default, no limitation */
2247 if (rate > 0 && rate < 500000) {
2248 if (rate < 1000)
2249 rate = 1000;
2250 reg |= ((4000000/rate) & 0xff8);
2251 /*
2252 * When RSC is used, ITR interval must be larger than
2253 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2254 * The minimum value is always greater than 2us on 100M
2255 * (and 10M?(not documented)), but it's not on 1G and higher.
2256 */
2257 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2258 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2259 if ((adapter->num_queues > 1)
2260 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2261 return EINVAL;
2262 }
2263 ixv_max_interrupt_rate = rate;
2264 } else
2265 ixv_max_interrupt_rate = 0;
2266 ixv_eitr_write(que, reg);
2267
2268 return (0);
2269 } /* ixv_sysctl_interrupt_rate_handler */
2270
2271 const struct sysctlnode *
2272 ixv_sysctl_instance(struct adapter *adapter)
2273 {
2274 const char *dvname;
2275 struct sysctllog **log;
2276 int rc;
2277 const struct sysctlnode *rnode;
2278
2279 log = &adapter->sysctllog;
2280 dvname = device_xname(adapter->dev);
2281
2282 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2283 0, CTLTYPE_NODE, dvname,
2284 SYSCTL_DESCR("ixv information and settings"),
2285 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2286 goto err;
2287
2288 return rnode;
2289 err:
2290 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2291 return NULL;
2292 }
2293
2294 static void
2295 ixv_add_device_sysctls(struct adapter *adapter)
2296 {
2297 struct sysctllog **log;
2298 const struct sysctlnode *rnode, *cnode;
2299 device_t dev;
2300
2301 dev = adapter->dev;
2302 log = &adapter->sysctllog;
2303
2304 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2305 aprint_error_dev(dev, "could not create sysctl root\n");
2306 return;
2307 }
2308
2309 if (sysctl_createv(log, 0, &rnode, &cnode,
2310 CTLFLAG_READWRITE, CTLTYPE_INT,
2311 "debug", SYSCTL_DESCR("Debug Info"),
2312 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2313 aprint_error_dev(dev, "could not create sysctl\n");
2314
2315 if (sysctl_createv(log, 0, &rnode, &cnode,
2316 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2317 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2318 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2319 aprint_error_dev(dev, "could not create sysctl\n");
2320
2321 if (sysctl_createv(log, 0, &rnode, &cnode,
2322 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2323 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2324 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2325 aprint_error_dev(dev, "could not create sysctl\n");
2326 }
2327
2328 /************************************************************************
2329 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2330 ************************************************************************/
2331 static void
2332 ixv_add_stats_sysctls(struct adapter *adapter)
2333 {
2334 device_t dev = adapter->dev;
2335 struct tx_ring *txr = adapter->tx_rings;
2336 struct rx_ring *rxr = adapter->rx_rings;
2337 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2338 struct ixgbe_hw *hw = &adapter->hw;
2339 const struct sysctlnode *rnode, *cnode;
2340 struct sysctllog **log = &adapter->sysctllog;
2341 const char *xname = device_xname(dev);
2342
2343 /* Driver Statistics */
2344 evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
2345 NULL, xname, "Handled queue in softint");
2346 evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
2347 NULL, xname, "Requeued in softint");
2348 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2349 NULL, xname, "Driver tx dma soft fail EFBIG");
2350 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2351 NULL, xname, "m_defrag() failed");
2352 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2353 NULL, xname, "Driver tx dma hard fail EFBIG");
2354 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2355 NULL, xname, "Driver tx dma hard fail EINVAL");
2356 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2357 NULL, xname, "Driver tx dma hard fail other");
2358 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2359 NULL, xname, "Driver tx dma soft fail EAGAIN");
2360 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2361 NULL, xname, "Driver tx dma soft fail ENOMEM");
2362 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2363 NULL, xname, "Watchdog timeouts");
2364 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2365 NULL, xname, "TSO errors");
2366 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2367 NULL, xname, "Link MSI-X IRQ Handled");
2368
2369 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2370 snprintf(adapter->queues[i].evnamebuf,
2371 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2372 xname, i);
2373 snprintf(adapter->queues[i].namebuf,
2374 sizeof(adapter->queues[i].namebuf), "q%d", i);
2375
2376 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2377 aprint_error_dev(dev, "could not create sysctl root\n");
2378 break;
2379 }
2380
2381 if (sysctl_createv(log, 0, &rnode, &rnode,
2382 0, CTLTYPE_NODE,
2383 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2384 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2385 break;
2386
2387 if (sysctl_createv(log, 0, &rnode, &cnode,
2388 CTLFLAG_READWRITE, CTLTYPE_INT,
2389 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2390 ixv_sysctl_interrupt_rate_handler, 0,
2391 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2392 break;
2393
2394 #if 0
2395 if (sysctl_createv(log, 0, &rnode, &cnode,
2396 CTLFLAG_READONLY, CTLTYPE_QUAD,
2397 "irqs", SYSCTL_DESCR("irqs on this queue"),
2398 NULL, 0, &(adapter->queues[i].irqs),
2399 0, CTL_CREATE, CTL_EOL) != 0)
2400 break;
2401 #endif
2402
2403 if (sysctl_createv(log, 0, &rnode, &cnode,
2404 CTLFLAG_READONLY, CTLTYPE_INT,
2405 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2406 ixv_sysctl_tdh_handler, 0, (void *)txr,
2407 0, CTL_CREATE, CTL_EOL) != 0)
2408 break;
2409
2410 if (sysctl_createv(log, 0, &rnode, &cnode,
2411 CTLFLAG_READONLY, CTLTYPE_INT,
2412 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2413 ixv_sysctl_tdt_handler, 0, (void *)txr,
2414 0, CTL_CREATE, CTL_EOL) != 0)
2415 break;
2416
2417 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2418 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2419 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2420 NULL, adapter->queues[i].evnamebuf, "TSO");
2421 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2422 NULL, adapter->queues[i].evnamebuf,
2423 "Queue No Descriptor Available");
2424 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2425 NULL, adapter->queues[i].evnamebuf,
2426 "Queue Packets Transmitted");
2427 #ifndef IXGBE_LEGACY_TX
2428 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2429 NULL, adapter->queues[i].evnamebuf,
2430 "Packets dropped in pcq");
2431 #endif
2432
2433 #ifdef LRO
2434 struct lro_ctrl *lro = &rxr->lro;
2435 #endif /* LRO */
2436
2437 if (sysctl_createv(log, 0, &rnode, &cnode,
2438 CTLFLAG_READONLY,
2439 CTLTYPE_INT,
2440 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2441 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2442 CTL_CREATE, CTL_EOL) != 0)
2443 break;
2444
2445 if (sysctl_createv(log, 0, &rnode, &cnode,
2446 CTLFLAG_READONLY,
2447 CTLTYPE_INT,
2448 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2449 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2450 CTL_CREATE, CTL_EOL) != 0)
2451 break;
2452
2453 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2454 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2455 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2456 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2457 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2458 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2459 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2460 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2461 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2462 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2463 #ifdef LRO
2464 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2465 CTLFLAG_RD, &lro->lro_queued, 0,
2466 "LRO Queued");
2467 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2468 CTLFLAG_RD, &lro->lro_flushed, 0,
2469 "LRO Flushed");
2470 #endif /* LRO */
2471 }
2472
2473 /* MAC stats get their own sub node */
2474
2475 snprintf(stats->namebuf,
2476 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2477
2478 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2479 stats->namebuf, "rx csum offload - IP");
2480 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2481 stats->namebuf, "rx csum offload - L4");
2482 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2483 stats->namebuf, "rx csum offload - IP bad");
2484 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2485 stats->namebuf, "rx csum offload - L4 bad");
2486
2487 /* Packet Reception Stats */
2488 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2489 xname, "Good Packets Received");
2490 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2491 xname, "Good Octets Received");
2492 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2493 xname, "Multicast Packets Received");
2494 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2495 xname, "Good Packets Transmitted");
2496 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2497 xname, "Good Octets Transmitted");
2498
2499 /* Mailbox Stats */
2500 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2501 xname, "message TXs");
2502 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2503 xname, "message RXs");
2504 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2505 xname, "ACKs");
2506 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2507 xname, "REQs");
2508 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2509 xname, "RSTs");
2510
2511 } /* ixv_add_stats_sysctls */
2512
2513 /************************************************************************
2514 * ixv_set_sysctl_value
2515 ************************************************************************/
2516 static void
2517 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2518 const char *description, int *limit, int value)
2519 {
2520 device_t dev = adapter->dev;
2521 struct sysctllog **log;
2522 const struct sysctlnode *rnode, *cnode;
2523
2524 log = &adapter->sysctllog;
2525 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2526 aprint_error_dev(dev, "could not create sysctl root\n");
2527 return;
2528 }
2529 if (sysctl_createv(log, 0, &rnode, &cnode,
2530 CTLFLAG_READWRITE, CTLTYPE_INT,
2531 name, SYSCTL_DESCR(description),
2532 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2533 aprint_error_dev(dev, "could not create sysctl\n");
2534 *limit = value;
2535 } /* ixv_set_sysctl_value */
2536
2537 /************************************************************************
2538 * ixv_print_debug_info
2539 *
2540 * Called only when em_display_debug_stats is enabled.
2541 * Provides a way to take a look at important statistics
2542 * maintained by the driver and hardware.
2543 ************************************************************************/
2544 static void
2545 ixv_print_debug_info(struct adapter *adapter)
2546 {
2547 device_t dev = adapter->dev;
2548 struct ixgbe_hw *hw = &adapter->hw;
2549 struct ix_queue *que = adapter->queues;
2550 struct rx_ring *rxr;
2551 struct tx_ring *txr;
2552 #ifdef LRO
2553 struct lro_ctrl *lro;
2554 #endif /* LRO */
2555
2556 device_printf(dev, "Error Byte Count = %u \n",
2557 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2558
2559 for (int i = 0; i < adapter->num_queues; i++, que++) {
2560 txr = que->txr;
2561 rxr = que->rxr;
2562 #ifdef LRO
2563 lro = &rxr->lro;
2564 #endif /* LRO */
2565 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2566 que->msix, (long)que->irqs.ev_count);
2567 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2568 rxr->me, (long long)rxr->rx_packets.ev_count);
2569 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2570 rxr->me, (long)rxr->rx_bytes.ev_count);
2571 #ifdef LRO
2572 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2573 rxr->me, (long long)lro->lro_queued);
2574 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2575 rxr->me, (long long)lro->lro_flushed);
2576 #endif /* LRO */
2577 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2578 txr->me, (long)txr->total_packets.ev_count);
2579 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2580 txr->me, (long)txr->no_desc_avail.ev_count);
2581 }
2582
2583 device_printf(dev, "MBX IRQ Handled: %lu\n",
2584 (long)adapter->link_irq.ev_count);
2585 } /* ixv_print_debug_info */
2586
2587 /************************************************************************
2588 * ixv_sysctl_debug
2589 ************************************************************************/
2590 static int
2591 ixv_sysctl_debug(SYSCTLFN_ARGS)
2592 {
2593 struct sysctlnode node;
2594 struct adapter *adapter;
2595 int error, result;
2596
2597 node = *rnode;
2598 node.sysctl_data = &result;
2599 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2600
2601 if (error || newp == NULL)
2602 return error;
2603
2604 if (result == 1) {
2605 adapter = (struct adapter *)node.sysctl_data;
2606 ixv_print_debug_info(adapter);
2607 }
2608
2609 return 0;
2610 } /* ixv_sysctl_debug */
2611
2612 /************************************************************************
2613 * ixv_init_device_features
2614 ************************************************************************/
2615 static void
2616 ixv_init_device_features(struct adapter *adapter)
2617 {
2618 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2619 | IXGBE_FEATURE_VF
2620 | IXGBE_FEATURE_RSS
2621 | IXGBE_FEATURE_LEGACY_TX;
2622
2623 /* A tad short on feature flags for VFs, atm. */
2624 switch (adapter->hw.mac.type) {
2625 case ixgbe_mac_82599_vf:
2626 break;
2627 case ixgbe_mac_X540_vf:
2628 break;
2629 case ixgbe_mac_X550_vf:
2630 case ixgbe_mac_X550EM_x_vf:
2631 case ixgbe_mac_X550EM_a_vf:
2632 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2633 break;
2634 default:
2635 break;
2636 }
2637
2638 /* Enabled by default... */
2639 /* Is a virtual function (VF) */
2640 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2641 adapter->feat_en |= IXGBE_FEATURE_VF;
2642 /* Netmap */
2643 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2644 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2645 /* Receive-Side Scaling (RSS) */
2646 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2647 adapter->feat_en |= IXGBE_FEATURE_RSS;
2648 /* Needs advanced context descriptor regardless of offloads req'd */
2649 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2650 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2651
2652 /* Enabled via sysctl... */
2653 /* Legacy (single queue) transmit */
2654 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2655 ixv_enable_legacy_tx)
2656 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2657 } /* ixv_init_device_features */
2658
2659 /************************************************************************
2660 * ixv_shutdown - Shutdown entry point
2661 ************************************************************************/
2662 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2663 static int
2664 ixv_shutdown(device_t dev)
2665 {
2666 struct adapter *adapter = device_private(dev);
2667 IXGBE_CORE_LOCK(adapter);
2668 ixv_stop(adapter);
2669 IXGBE_CORE_UNLOCK(adapter);
2670
2671 return (0);
2672 } /* ixv_shutdown */
2673 #endif
2674
2675 static int
2676 ixv_ifflags_cb(struct ethercom *ec)
2677 {
2678 struct ifnet *ifp = &ec->ec_if;
2679 struct adapter *adapter = ifp->if_softc;
2680 int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
2681
2682 IXGBE_CORE_LOCK(adapter);
2683
2684 if (change != 0)
2685 adapter->if_flags = ifp->if_flags;
2686
2687 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2688 rc = ENETRESET;
2689
2690 /* Set up VLAN support and filter */
2691 ixv_setup_vlan_support(adapter);
2692
2693 IXGBE_CORE_UNLOCK(adapter);
2694
2695 return rc;
2696 }
2697
2698
2699 /************************************************************************
2700 * ixv_ioctl - Ioctl entry point
2701 *
2702 * Called when the user wants to configure the interface.
2703 *
2704 * return 0 on success, positive on failure
2705 ************************************************************************/
2706 static int
2707 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2708 {
2709 struct adapter *adapter = ifp->if_softc;
2710 struct ifcapreq *ifcr = data;
2711 struct ifreq *ifr = data;
2712 int error = 0;
2713 int l4csum_en;
2714 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2715 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2716
2717 switch (command) {
2718 case SIOCSIFFLAGS:
2719 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2720 break;
2721 case SIOCADDMULTI:
2722 case SIOCDELMULTI:
2723 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2724 break;
2725 case SIOCSIFMEDIA:
2726 case SIOCGIFMEDIA:
2727 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2728 break;
2729 case SIOCSIFCAP:
2730 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2731 break;
2732 case SIOCSIFMTU:
2733 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2734 break;
2735 default:
2736 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2737 break;
2738 }
2739
2740 switch (command) {
2741 case SIOCSIFMEDIA:
2742 case SIOCGIFMEDIA:
2743 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2744 case SIOCSIFCAP:
2745 /* Layer-4 Rx checksum offload has to be turned on and
2746 * off as a unit.
2747 */
2748 l4csum_en = ifcr->ifcr_capenable & l4csum;
2749 if (l4csum_en != l4csum && l4csum_en != 0)
2750 return EINVAL;
2751 /*FALLTHROUGH*/
2752 case SIOCADDMULTI:
2753 case SIOCDELMULTI:
2754 case SIOCSIFFLAGS:
2755 case SIOCSIFMTU:
2756 default:
2757 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2758 return error;
2759 if ((ifp->if_flags & IFF_RUNNING) == 0)
2760 ;
2761 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2762 IXGBE_CORE_LOCK(adapter);
2763 ixv_init_locked(adapter);
2764 IXGBE_CORE_UNLOCK(adapter);
2765 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2766 /*
2767 * Multicast list has changed; set the hardware filter
2768 * accordingly.
2769 */
2770 IXGBE_CORE_LOCK(adapter);
2771 ixv_disable_intr(adapter);
2772 ixv_set_multi(adapter);
2773 ixv_enable_intr(adapter);
2774 IXGBE_CORE_UNLOCK(adapter);
2775 }
2776 return 0;
2777 }
2778 } /* ixv_ioctl */
2779
2780 /************************************************************************
2781 * ixv_init
2782 ************************************************************************/
2783 static int
2784 ixv_init(struct ifnet *ifp)
2785 {
2786 struct adapter *adapter = ifp->if_softc;
2787
2788 IXGBE_CORE_LOCK(adapter);
2789 ixv_init_locked(adapter);
2790 IXGBE_CORE_UNLOCK(adapter);
2791
2792 return 0;
2793 } /* ixv_init */
2794
2795 /************************************************************************
2796 * ixv_handle_que
2797 ************************************************************************/
2798 static void
2799 ixv_handle_que(void *context)
2800 {
2801 struct ix_queue *que = context;
2802 struct adapter *adapter = que->adapter;
2803 struct tx_ring *txr = que->txr;
2804 struct ifnet *ifp = adapter->ifp;
2805 bool more;
2806
2807 adapter->handleq.ev_count++;
2808
2809 if (ifp->if_flags & IFF_RUNNING) {
2810 more = ixgbe_rxeof(que);
2811 IXGBE_TX_LOCK(txr);
2812 more |= ixgbe_txeof(txr);
2813 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2814 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2815 ixgbe_mq_start_locked(ifp, txr);
2816 /* Only for queue 0 */
2817 /* NetBSD still needs this for CBQ */
2818 if ((&adapter->queues[0] == que)
2819 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2820 ixgbe_legacy_start_locked(ifp, txr);
2821 IXGBE_TX_UNLOCK(txr);
2822 if (more) {
2823 adapter->req.ev_count++;
2824 if (adapter->txrx_use_workqueue) {
2825 /*
2826 * "enqueued flag" is not required here
2827 * the same as ixg(4). See ixgbe_msix_que().
2828 */
2829 workqueue_enqueue(adapter->que_wq,
2830 &que->wq_cookie, curcpu());
2831 } else
2832 softint_schedule(que->que_si);
2833 return;
2834 }
2835 }
2836
2837 /* Re-enable this interrupt */
2838 ixv_enable_queue(adapter, que->msix);
2839
2840 return;
2841 } /* ixv_handle_que */
2842
2843 /************************************************************************
2844 * ixv_handle_que_work
2845 ************************************************************************/
2846 static void
2847 ixv_handle_que_work(struct work *wk, void *context)
2848 {
2849 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2850
2851 /*
2852 * "enqueued flag" is not required here the same as ixg(4).
2853 * See ixgbe_msix_que().
2854 */
2855 ixv_handle_que(que);
2856 }
2857
2858 /************************************************************************
2859 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2860 ************************************************************************/
2861 static int
2862 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2863 {
2864 device_t dev = adapter->dev;
2865 struct ix_queue *que = adapter->queues;
2866 struct tx_ring *txr = adapter->tx_rings;
2867 int error, msix_ctrl, rid, vector = 0;
2868 pci_chipset_tag_t pc;
2869 pcitag_t tag;
2870 char intrbuf[PCI_INTRSTR_LEN];
2871 char wqname[MAXCOMLEN];
2872 char intr_xname[32];
2873 const char *intrstr = NULL;
2874 kcpuset_t *affinity;
2875 int cpu_id = 0;
2876
2877 pc = adapter->osdep.pc;
2878 tag = adapter->osdep.tag;
2879
2880 adapter->osdep.nintrs = adapter->num_queues + 1;
2881 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2882 adapter->osdep.nintrs) != 0) {
2883 aprint_error_dev(dev,
2884 "failed to allocate MSI-X interrupt\n");
2885 return (ENXIO);
2886 }
2887
2888 kcpuset_create(&affinity, false);
2889 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2890 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2891 device_xname(dev), i);
2892 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2893 sizeof(intrbuf));
2894 #ifdef IXGBE_MPSAFE
2895 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2896 true);
2897 #endif
2898 /* Set the handler function */
2899 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2900 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2901 intr_xname);
2902 if (que->res == NULL) {
2903 pci_intr_release(pc, adapter->osdep.intrs,
2904 adapter->osdep.nintrs);
2905 aprint_error_dev(dev,
2906 "Failed to register QUE handler\n");
2907 kcpuset_destroy(affinity);
2908 return (ENXIO);
2909 }
2910 que->msix = vector;
2911 adapter->active_queues |= (u64)(1 << que->msix);
2912
2913 cpu_id = i;
2914 /* Round-robin affinity */
2915 kcpuset_zero(affinity);
2916 kcpuset_set(affinity, cpu_id % ncpu);
2917 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2918 NULL);
2919 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2920 intrstr);
2921 if (error == 0)
2922 aprint_normal(", bound queue %d to cpu %d\n",
2923 i, cpu_id % ncpu);
2924 else
2925 aprint_normal("\n");
2926
2927 #ifndef IXGBE_LEGACY_TX
2928 txr->txr_si
2929 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2930 ixgbe_deferred_mq_start, txr);
2931 #endif
2932 que->que_si
2933 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2934 ixv_handle_que, que);
2935 if (que->que_si == NULL) {
2936 aprint_error_dev(dev,
2937 "could not establish software interrupt\n");
2938 }
2939 }
2940 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
2941 error = workqueue_create(&adapter->txr_wq, wqname,
2942 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2943 IXGBE_WORKQUEUE_FLAGS);
2944 if (error) {
2945 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
2946 }
2947 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
2948
2949 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
2950 error = workqueue_create(&adapter->que_wq, wqname,
2951 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2952 IXGBE_WORKQUEUE_FLAGS);
2953 if (error) {
2954 aprint_error_dev(dev,
2955 "couldn't create workqueue\n");
2956 }
2957
2958 /* and Mailbox */
2959 cpu_id++;
2960 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
2961 adapter->vector = vector;
2962 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
2963 sizeof(intrbuf));
2964 #ifdef IXGBE_MPSAFE
2965 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
2966 true);
2967 #endif
2968 /* Set the mbx handler function */
2969 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
2970 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
2971 intr_xname);
2972 if (adapter->osdep.ihs[vector] == NULL) {
2973 adapter->res = NULL;
2974 aprint_error_dev(dev, "Failed to register LINK handler\n");
2975 kcpuset_destroy(affinity);
2976 return (ENXIO);
2977 }
2978 /* Round-robin affinity */
2979 kcpuset_zero(affinity);
2980 kcpuset_set(affinity, cpu_id % ncpu);
2981 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
2982
2983 aprint_normal_dev(dev,
2984 "for link, interrupting at %s", intrstr);
2985 if (error == 0)
2986 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
2987 else
2988 aprint_normal("\n");
2989
2990 /* Tasklets for Mailbox */
2991 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
2992 ixv_handle_link, adapter);
2993 /*
2994 * Due to a broken design QEMU will fail to properly
2995 * enable the guest for MSI-X unless the vectors in
2996 * the table are all set up, so we must rewrite the
2997 * ENABLE in the MSI-X control register again at this
2998 * point to cause it to successfully initialize us.
2999 */
3000 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3001 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3002 rid += PCI_MSIX_CTL;
3003 msix_ctrl = pci_conf_read(pc, tag, rid);
3004 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3005 pci_conf_write(pc, tag, rid, msix_ctrl);
3006 }
3007
3008 kcpuset_destroy(affinity);
3009 return (0);
3010 } /* ixv_allocate_msix */
3011
3012 /************************************************************************
3013 * ixv_configure_interrupts - Setup MSI-X resources
3014 *
3015 * Note: The VF device MUST use MSI-X, there is no fallback.
3016 ************************************************************************/
3017 static int
3018 ixv_configure_interrupts(struct adapter *adapter)
3019 {
3020 device_t dev = adapter->dev;
3021 int want, queues, msgs;
3022
3023 /* Must have at least 2 MSI-X vectors */
3024 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3025 if (msgs < 2) {
3026 aprint_error_dev(dev, "MSIX config error\n");
3027 return (ENXIO);
3028 }
3029 msgs = MIN(msgs, IXG_MAX_NINTR);
3030
3031 /* Figure out a reasonable auto config value */
3032 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3033
3034 if (ixv_num_queues != 0)
3035 queues = ixv_num_queues;
3036 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3037 queues = IXGBE_VF_MAX_TX_QUEUES;
3038
3039 /*
3040 * Want vectors for the queues,
3041 * plus an additional for mailbox.
3042 */
3043 want = queues + 1;
3044 if (msgs >= want)
3045 msgs = want;
3046 else {
3047 aprint_error_dev(dev,
3048 "MSI-X Configuration Problem, "
3049 "%d vectors but %d queues wanted!\n",
3050 msgs, want);
3051 return -1;
3052 }
3053
3054 adapter->msix_mem = (void *)1; /* XXX */
3055 aprint_normal_dev(dev,
3056 "Using MSI-X interrupts with %d vectors\n", msgs);
3057 adapter->num_queues = queues;
3058
3059 return (0);
3060 } /* ixv_configure_interrupts */
3061
3062
3063 /************************************************************************
3064 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3065 *
3066 * Done outside of interrupt context since the driver might sleep
3067 ************************************************************************/
3068 static void
3069 ixv_handle_link(void *context)
3070 {
3071 struct adapter *adapter = context;
3072
3073 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3074 &adapter->link_up, FALSE);
3075 ixv_update_link_status(adapter);
3076 } /* ixv_handle_link */
3077
3078 /************************************************************************
3079 * ixv_check_link - Used in the local timer to poll for link changes
3080 ************************************************************************/
3081 static void
3082 ixv_check_link(struct adapter *adapter)
3083 {
3084 adapter->hw.mac.get_link_status = TRUE;
3085
3086 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3087 &adapter->link_up, FALSE);
3088 ixv_update_link_status(adapter);
3089 } /* ixv_check_link */
3090