ixv.c revision 1.103 1 /*$NetBSD: ixv.c,v 1.103 2018/06/03 10:24:24 maxv Exp $*/
2
3 /******************************************************************************
4
5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
13
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
17
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ******************************************************************************/
35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36
37 #ifdef _KERNEL_OPT
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_net_mpsafe.h"
41 #endif
42
43 #include "ixgbe.h"
44 #include "vlan.h"
45
46 /************************************************************************
47 * Driver version
48 ************************************************************************/
49 static const char ixv_driver_version[] = "2.0.1-k";
50
51 /************************************************************************
52 * PCI Device ID Table
53 *
54 * Used by probe to select devices to load on
55 * Last field stores an index into ixv_strings
56 * Last entry must be all 0s
57 *
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59 ************************************************************************/
60 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
61 {
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
67 /* required last entry */
68 {0, 0, 0, 0, 0}
69 };
70
71 /************************************************************************
72 * Table of branding strings
73 ************************************************************************/
74 static const char *ixv_strings[] = {
75 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 };
77
78 /*********************************************************************
79 * Function prototypes
80 *********************************************************************/
81 static int ixv_probe(device_t, cfdata_t, void *);
82 static void ixv_attach(device_t, device_t, void *);
83 static int ixv_detach(device_t, int);
84 #if 0
85 static int ixv_shutdown(device_t);
86 #endif
87 static int ixv_ifflags_cb(struct ethercom *);
88 static int ixv_ioctl(struct ifnet *, u_long, void *);
89 static int ixv_init(struct ifnet *);
90 static void ixv_init_locked(struct adapter *);
91 static void ixv_ifstop(struct ifnet *, int);
92 static void ixv_stop(void *);
93 static void ixv_init_device_features(struct adapter *);
94 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
95 static int ixv_media_change(struct ifnet *);
96 static int ixv_allocate_pci_resources(struct adapter *,
97 const struct pci_attach_args *);
98 static int ixv_allocate_msix(struct adapter *,
99 const struct pci_attach_args *);
100 static int ixv_configure_interrupts(struct adapter *);
101 static void ixv_free_pci_resources(struct adapter *);
102 static void ixv_local_timer(void *);
103 static void ixv_local_timer_locked(void *);
104 static int ixv_setup_interface(device_t, struct adapter *);
105 static int ixv_negotiate_api(struct adapter *);
106
107 static void ixv_initialize_transmit_units(struct adapter *);
108 static void ixv_initialize_receive_units(struct adapter *);
109 static void ixv_initialize_rss_mapping(struct adapter *);
110 static void ixv_check_link(struct adapter *);
111
112 static void ixv_enable_intr(struct adapter *);
113 static void ixv_disable_intr(struct adapter *);
114 static void ixv_set_multi(struct adapter *);
115 static void ixv_update_link_status(struct adapter *);
116 static int ixv_sysctl_debug(SYSCTLFN_PROTO);
117 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
118 static void ixv_configure_ivars(struct adapter *);
119 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
120 static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
121
122 static void ixv_setup_vlan_support(struct adapter *);
123 #if 0
124 static void ixv_register_vlan(void *, struct ifnet *, u16);
125 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
126 #endif
127
128 static void ixv_add_device_sysctls(struct adapter *);
129 static void ixv_save_stats(struct adapter *);
130 static void ixv_init_stats(struct adapter *);
131 static void ixv_update_stats(struct adapter *);
132 static void ixv_add_stats_sysctls(struct adapter *);
133
134
135 /* Sysctl handlers */
136 static void ixv_set_sysctl_value(struct adapter *, const char *,
137 const char *, int *, int);
138 static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
139 static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
140 static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
141 static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
142 static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
143 static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
144
145 /* The MSI-X Interrupt handlers */
146 static int ixv_msix_que(void *);
147 static int ixv_msix_mbx(void *);
148
149 /* Deferred interrupt tasklets */
150 static void ixv_handle_que(void *);
151 static void ixv_handle_link(void *);
152
153 /* Workqueue handler for deferred work */
154 static void ixv_handle_que_work(struct work *, void *);
155
156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
157 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
158
159 /************************************************************************
160 * FreeBSD Device Interface Entry Points
161 ************************************************************************/
162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
164 DVF_DETACH_SHUTDOWN);
165
166 #if 0
167 static driver_t ixv_driver = {
168 "ixv", ixv_methods, sizeof(struct adapter),
169 };
170
171 devclass_t ixv_devclass;
172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
175 #endif
176
177 /*
178 * TUNEABLE PARAMETERS:
179 */
180
181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
182 static int ixv_num_queues = 0;
183 #define TUNABLE_INT(__x, __y)
184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
185
186 /*
187 * AIM: Adaptive Interrupt Moderation
188 * which means that the interrupt rate
189 * is varied over time based on the
190 * traffic for that interrupt vector
191 */
192 static bool ixv_enable_aim = false;
193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194
195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
197
198 /* How many packets rxeof tries to clean at a time */
199 static int ixv_rx_process_limit = 256;
200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201
202 /* How many packets txeof tries to clean at a time */
203 static int ixv_tx_process_limit = 256;
204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
205
206 /* Which pakcet processing uses workqueue or softint */
207 static bool ixv_txrx_workqueue = false;
208
209 /*
210 * Number of TX descriptors per ring,
211 * setting higher than RX as this seems
212 * the better performing choice.
213 */
214 static int ixv_txd = PERFORM_TXD;
215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
216
217 /* Number of RX descriptors per ring */
218 static int ixv_rxd = PERFORM_RXD;
219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
220
221 /* Legacy Transmit (single queue) */
222 static int ixv_enable_legacy_tx = 0;
223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
224
225 #ifdef NET_MPSAFE
226 #define IXGBE_MPSAFE 1
227 #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
228 #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
229 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
230 #else
231 #define IXGBE_CALLOUT_FLAGS 0
232 #define IXGBE_SOFTINFT_FLAGS 0
233 #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
234 #endif
235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
236
237 #if 0
238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
240 #endif
241
242 /************************************************************************
243 * ixv_probe - Device identification routine
244 *
245 * Determines if the driver should be loaded on
246 * adapter based on its PCI vendor/device ID.
247 *
248 * return BUS_PROBE_DEFAULT on success, positive on failure
249 ************************************************************************/
250 static int
251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
252 {
253 #ifdef __HAVE_PCI_MSI_MSIX
254 const struct pci_attach_args *pa = aux;
255
256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
257 #else
258 return 0;
259 #endif
260 } /* ixv_probe */
261
262 static const ixgbe_vendor_info_t *
263 ixv_lookup(const struct pci_attach_args *pa)
264 {
265 const ixgbe_vendor_info_t *ent;
266 pcireg_t subid;
267
268 INIT_DEBUGOUT("ixv_lookup: begin");
269
270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
271 return NULL;
272
273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
274
275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
279 (ent->subvendor_id == 0)) &&
280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
281 (ent->subdevice_id == 0))) {
282 return ent;
283 }
284 }
285
286 return NULL;
287 }
288
289 /************************************************************************
290 * ixv_attach - Device initialization routine
291 *
292 * Called when the driver is being loaded.
293 * Identifies the type of hardware, allocates all resources
294 * and initializes the hardware.
295 *
296 * return 0 on success, positive on failure
297 ************************************************************************/
298 static void
299 ixv_attach(device_t parent, device_t dev, void *aux)
300 {
301 struct adapter *adapter;
302 struct ixgbe_hw *hw;
303 int error = 0;
304 pcireg_t id, subid;
305 const ixgbe_vendor_info_t *ent;
306 const struct pci_attach_args *pa = aux;
307 const char *apivstr;
308 const char *str;
309 char buf[256];
310
311 INIT_DEBUGOUT("ixv_attach: begin");
312
313 /*
314 * Make sure BUSMASTER is set, on a VM under
315 * KVM it may not be and will break things.
316 */
317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
318
319 /* Allocate, clear, and link in our adapter structure */
320 adapter = device_private(dev);
321 adapter->dev = dev;
322 adapter->hw.back = adapter;
323 hw = &adapter->hw;
324
325 adapter->init_locked = ixv_init_locked;
326 adapter->stop_locked = ixv_stop;
327
328 adapter->osdep.pc = pa->pa_pc;
329 adapter->osdep.tag = pa->pa_tag;
330 if (pci_dma64_available(pa))
331 adapter->osdep.dmat = pa->pa_dmat64;
332 else
333 adapter->osdep.dmat = pa->pa_dmat;
334 adapter->osdep.attached = false;
335
336 ent = ixv_lookup(pa);
337
338 KASSERT(ent != NULL);
339
340 aprint_normal(": %s, Version - %s\n",
341 ixv_strings[ent->index], ixv_driver_version);
342
343 /* Core Lock Init*/
344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
345
346 /* Do base PCI setup - map BAR0 */
347 if (ixv_allocate_pci_resources(adapter, pa)) {
348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
349 error = ENXIO;
350 goto err_out;
351 }
352
353 /* SYSCTL APIs */
354 ixv_add_device_sysctls(adapter);
355
356 /* Set up the timer callout */
357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
358
359 /* Save off the information about this board */
360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
362 hw->vendor_id = PCI_VENDOR(id);
363 hw->device_id = PCI_PRODUCT(id);
364 hw->revision_id =
365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
368
369 /* A subset of set_mac_type */
370 switch (hw->device_id) {
371 case IXGBE_DEV_ID_82599_VF:
372 hw->mac.type = ixgbe_mac_82599_vf;
373 str = "82599 VF";
374 break;
375 case IXGBE_DEV_ID_X540_VF:
376 hw->mac.type = ixgbe_mac_X540_vf;
377 str = "X540 VF";
378 break;
379 case IXGBE_DEV_ID_X550_VF:
380 hw->mac.type = ixgbe_mac_X550_vf;
381 str = "X550 VF";
382 break;
383 case IXGBE_DEV_ID_X550EM_X_VF:
384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
385 str = "X550EM X VF";
386 break;
387 case IXGBE_DEV_ID_X550EM_A_VF:
388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
389 str = "X550EM A VF";
390 break;
391 default:
392 /* Shouldn't get here since probe succeeded */
393 aprint_error_dev(dev, "Unknown device ID!\n");
394 error = ENXIO;
395 goto err_out;
396 break;
397 }
398 aprint_normal_dev(dev, "device %s\n", str);
399
400 ixv_init_device_features(adapter);
401
402 /* Initialize the shared code */
403 error = ixgbe_init_ops_vf(hw);
404 if (error) {
405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
406 error = EIO;
407 goto err_out;
408 }
409
410 /* Setup the mailbox */
411 ixgbe_init_mbx_params_vf(hw);
412
413 /* Set the right number of segments */
414 adapter->num_segs = IXGBE_82599_SCATTER;
415
416 /* Reset mbox api to 1.0 */
417 error = hw->mac.ops.reset_hw(hw);
418 if (error == IXGBE_ERR_RESET_FAILED)
419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
420 else if (error)
421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
422 error);
423 if (error) {
424 error = EIO;
425 goto err_out;
426 }
427
428 error = hw->mac.ops.init_hw(hw);
429 if (error) {
430 aprint_error_dev(dev, "...init_hw() failed!\n");
431 error = EIO;
432 goto err_out;
433 }
434
435 /* Negotiate mailbox API version */
436 error = ixv_negotiate_api(adapter);
437 if (error)
438 aprint_normal_dev(dev,
439 "MBX API negotiation failed during attach!\n");
440 switch (hw->api_version) {
441 case ixgbe_mbox_api_10:
442 apivstr = "1.0";
443 break;
444 case ixgbe_mbox_api_20:
445 apivstr = "2.0";
446 break;
447 case ixgbe_mbox_api_11:
448 apivstr = "1.1";
449 break;
450 case ixgbe_mbox_api_12:
451 apivstr = "1.2";
452 break;
453 case ixgbe_mbox_api_13:
454 apivstr = "1.3";
455 break;
456 default:
457 apivstr = "unknown";
458 break;
459 }
460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
461
462 /* If no mac address was assigned, make a random one */
463 if (!ixv_check_ether_addr(hw->mac.addr)) {
464 u8 addr[ETHER_ADDR_LEN];
465 uint64_t rndval = cprng_strong64();
466
467 memcpy(addr, &rndval, sizeof(addr));
468 addr[0] &= 0xFE;
469 addr[0] |= 0x02;
470 bcopy(addr, hw->mac.addr, sizeof(addr));
471 }
472
473 /* Register for VLAN events */
474 #if 0 /* XXX delete after write? */
475 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
476 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
477 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
478 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
479 #endif
480
481 /* Sysctls for limiting the amount of work done in the taskqueues */
482 ixv_set_sysctl_value(adapter, "rx_processing_limit",
483 "max number of rx packets to process",
484 &adapter->rx_process_limit, ixv_rx_process_limit);
485
486 ixv_set_sysctl_value(adapter, "tx_processing_limit",
487 "max number of tx packets to process",
488 &adapter->tx_process_limit, ixv_tx_process_limit);
489
490 /* Do descriptor calc and sanity checks */
491 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
492 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
493 aprint_error_dev(dev, "TXD config issue, using default!\n");
494 adapter->num_tx_desc = DEFAULT_TXD;
495 } else
496 adapter->num_tx_desc = ixv_txd;
497
498 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
499 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
500 aprint_error_dev(dev, "RXD config issue, using default!\n");
501 adapter->num_rx_desc = DEFAULT_RXD;
502 } else
503 adapter->num_rx_desc = ixv_rxd;
504
505 /* Setup MSI-X */
506 error = ixv_configure_interrupts(adapter);
507 if (error)
508 goto err_out;
509
510 /* Allocate our TX/RX Queues */
511 if (ixgbe_allocate_queues(adapter)) {
512 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
513 error = ENOMEM;
514 goto err_out;
515 }
516
517 /* hw.ix defaults init */
518 adapter->enable_aim = ixv_enable_aim;
519
520 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
521
522 error = ixv_allocate_msix(adapter, pa);
523 if (error) {
524 device_printf(dev, "ixv_allocate_msix() failed!\n");
525 goto err_late;
526 }
527
528 /* Setup OS specific network interface */
529 error = ixv_setup_interface(dev, adapter);
530 if (error != 0) {
531 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
532 goto err_late;
533 }
534
535 /* Do the stats setup */
536 ixv_save_stats(adapter);
537 ixv_init_stats(adapter);
538 ixv_add_stats_sysctls(adapter);
539
540 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
541 ixgbe_netmap_attach(adapter);
542
543 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
544 aprint_verbose_dev(dev, "feature cap %s\n", buf);
545 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
546 aprint_verbose_dev(dev, "feature ena %s\n", buf);
547
548 INIT_DEBUGOUT("ixv_attach: end");
549 adapter->osdep.attached = true;
550
551 return;
552
553 err_late:
554 ixgbe_free_transmit_structures(adapter);
555 ixgbe_free_receive_structures(adapter);
556 free(adapter->queues, M_DEVBUF);
557 err_out:
558 ixv_free_pci_resources(adapter);
559 IXGBE_CORE_LOCK_DESTROY(adapter);
560
561 return;
562 } /* ixv_attach */
563
564 /************************************************************************
565 * ixv_detach - Device removal routine
566 *
567 * Called when the driver is being removed.
568 * Stops the adapter and deallocates all the resources
569 * that were allocated for driver operation.
570 *
571 * return 0 on success, positive on failure
572 ************************************************************************/
573 static int
574 ixv_detach(device_t dev, int flags)
575 {
576 struct adapter *adapter = device_private(dev);
577 struct ixgbe_hw *hw = &adapter->hw;
578 struct ix_queue *que = adapter->queues;
579 struct tx_ring *txr = adapter->tx_rings;
580 struct rx_ring *rxr = adapter->rx_rings;
581 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
582
583 INIT_DEBUGOUT("ixv_detach: begin");
584 if (adapter->osdep.attached == false)
585 return 0;
586
587 /* Stop the interface. Callouts are stopped in it. */
588 ixv_ifstop(adapter->ifp, 1);
589
590 #if NVLAN > 0
591 /* Make sure VLANs are not using driver */
592 if (!VLAN_ATTACHED(&adapter->osdep.ec))
593 ; /* nothing to do: no VLANs */
594 else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
595 vlan_ifdetach(adapter->ifp);
596 else {
597 aprint_error_dev(dev, "VLANs in use, detach first\n");
598 return EBUSY;
599 }
600 #endif
601
602 IXGBE_CORE_LOCK(adapter);
603 ixv_stop(adapter);
604 IXGBE_CORE_UNLOCK(adapter);
605
606 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
607 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
608 softint_disestablish(txr->txr_si);
609 softint_disestablish(que->que_si);
610 }
611 if (adapter->txr_wq != NULL)
612 workqueue_destroy(adapter->txr_wq);
613 if (adapter->txr_wq_enqueued != NULL)
614 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
615 if (adapter->que_wq != NULL)
616 workqueue_destroy(adapter->que_wq);
617
618 /* Drain the Mailbox(link) queue */
619 softint_disestablish(adapter->link_si);
620
621 /* Unregister VLAN events */
622 #if 0 /* XXX msaitoh delete after write? */
623 if (adapter->vlan_attach != NULL)
624 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
625 if (adapter->vlan_detach != NULL)
626 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
627 #endif
628
629 ether_ifdetach(adapter->ifp);
630 callout_halt(&adapter->timer, NULL);
631
632 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
633 netmap_detach(adapter->ifp);
634
635 ixv_free_pci_resources(adapter);
636 #if 0 /* XXX the NetBSD port is probably missing something here */
637 bus_generic_detach(dev);
638 #endif
639 if_detach(adapter->ifp);
640 if_percpuq_destroy(adapter->ipq);
641
642 sysctl_teardown(&adapter->sysctllog);
643 evcnt_detach(&adapter->efbig_tx_dma_setup);
644 evcnt_detach(&adapter->mbuf_defrag_failed);
645 evcnt_detach(&adapter->efbig2_tx_dma_setup);
646 evcnt_detach(&adapter->einval_tx_dma_setup);
647 evcnt_detach(&adapter->other_tx_dma_setup);
648 evcnt_detach(&adapter->eagain_tx_dma_setup);
649 evcnt_detach(&adapter->enomem_tx_dma_setup);
650 evcnt_detach(&adapter->watchdog_events);
651 evcnt_detach(&adapter->tso_err);
652 evcnt_detach(&adapter->link_irq);
653
654 txr = adapter->tx_rings;
655 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
656 evcnt_detach(&adapter->queues[i].irqs);
657 evcnt_detach(&adapter->queues[i].handleq);
658 evcnt_detach(&adapter->queues[i].req);
659 evcnt_detach(&txr->no_desc_avail);
660 evcnt_detach(&txr->total_packets);
661 evcnt_detach(&txr->tso_tx);
662 #ifndef IXGBE_LEGACY_TX
663 evcnt_detach(&txr->pcq_drops);
664 #endif
665
666 evcnt_detach(&rxr->rx_packets);
667 evcnt_detach(&rxr->rx_bytes);
668 evcnt_detach(&rxr->rx_copies);
669 evcnt_detach(&rxr->no_jmbuf);
670 evcnt_detach(&rxr->rx_discarded);
671 }
672 evcnt_detach(&stats->ipcs);
673 evcnt_detach(&stats->l4cs);
674 evcnt_detach(&stats->ipcs_bad);
675 evcnt_detach(&stats->l4cs_bad);
676
677 /* Packet Reception Stats */
678 evcnt_detach(&stats->vfgorc);
679 evcnt_detach(&stats->vfgprc);
680 evcnt_detach(&stats->vfmprc);
681
682 /* Packet Transmission Stats */
683 evcnt_detach(&stats->vfgotc);
684 evcnt_detach(&stats->vfgptc);
685
686 /* Mailbox Stats */
687 evcnt_detach(&hw->mbx.stats.msgs_tx);
688 evcnt_detach(&hw->mbx.stats.msgs_rx);
689 evcnt_detach(&hw->mbx.stats.acks);
690 evcnt_detach(&hw->mbx.stats.reqs);
691 evcnt_detach(&hw->mbx.stats.rsts);
692
693 ixgbe_free_transmit_structures(adapter);
694 ixgbe_free_receive_structures(adapter);
695 for (int i = 0; i < adapter->num_queues; i++) {
696 struct ix_queue *lque = &adapter->queues[i];
697 mutex_destroy(&lque->dc_mtx);
698 }
699 free(adapter->queues, M_DEVBUF);
700
701 IXGBE_CORE_LOCK_DESTROY(adapter);
702
703 return (0);
704 } /* ixv_detach */
705
706 /************************************************************************
707 * ixv_init_locked - Init entry point
708 *
709 * Used in two ways: It is used by the stack as an init entry
710 * point in network interface structure. It is also used
711 * by the driver as a hw/sw initialization routine to get
712 * to a consistent state.
713 *
714 * return 0 on success, positive on failure
715 ************************************************************************/
716 static void
717 ixv_init_locked(struct adapter *adapter)
718 {
719 struct ifnet *ifp = adapter->ifp;
720 device_t dev = adapter->dev;
721 struct ixgbe_hw *hw = &adapter->hw;
722 struct ix_queue *que;
723 int error = 0;
724 uint32_t mask;
725 int i;
726
727 INIT_DEBUGOUT("ixv_init_locked: begin");
728 KASSERT(mutex_owned(&adapter->core_mtx));
729 hw->adapter_stopped = FALSE;
730 hw->mac.ops.stop_adapter(hw);
731 callout_stop(&adapter->timer);
732 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
733 que->disabled_count = 0;
734
735 /* reprogram the RAR[0] in case user changed it. */
736 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
737
738 /* Get the latest mac address, User can use a LAA */
739 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
740 IXGBE_ETH_LENGTH_OF_ADDRESS);
741 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
742
743 /* Prepare transmit descriptors and buffers */
744 if (ixgbe_setup_transmit_structures(adapter)) {
745 aprint_error_dev(dev, "Could not setup transmit structures\n");
746 ixv_stop(adapter);
747 return;
748 }
749
750 /* Reset VF and renegotiate mailbox API version */
751 hw->mac.ops.reset_hw(hw);
752 hw->mac.ops.start_hw(hw);
753 error = ixv_negotiate_api(adapter);
754 if (error)
755 device_printf(dev,
756 "Mailbox API negotiation failed in init_locked!\n");
757
758 ixv_initialize_transmit_units(adapter);
759
760 /* Setup Multicast table */
761 ixv_set_multi(adapter);
762
763 /*
764 * Determine the correct mbuf pool
765 * for doing jumbo/headersplit
766 */
767 if (ifp->if_mtu > ETHERMTU)
768 adapter->rx_mbuf_sz = MJUMPAGESIZE;
769 else
770 adapter->rx_mbuf_sz = MCLBYTES;
771
772 /* Prepare receive descriptors and buffers */
773 if (ixgbe_setup_receive_structures(adapter)) {
774 device_printf(dev, "Could not setup receive structures\n");
775 ixv_stop(adapter);
776 return;
777 }
778
779 /* Configure RX settings */
780 ixv_initialize_receive_units(adapter);
781
782 #if 0 /* XXX isn't it required? -- msaitoh */
783 /* Set the various hardware offload abilities */
784 ifp->if_hwassist = 0;
785 if (ifp->if_capenable & IFCAP_TSO4)
786 ifp->if_hwassist |= CSUM_TSO;
787 if (ifp->if_capenable & IFCAP_TXCSUM) {
788 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
789 #if __FreeBSD_version >= 800000
790 ifp->if_hwassist |= CSUM_SCTP;
791 #endif
792 }
793 #endif
794
795 /* Set up VLAN offload and filter */
796 ixv_setup_vlan_support(adapter);
797
798 /* Set up MSI-X routing */
799 ixv_configure_ivars(adapter);
800
801 /* Set up auto-mask */
802 mask = (1 << adapter->vector);
803 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
804 mask |= (1 << que->msix);
805 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
806
807 /* Set moderation on the Link interrupt */
808 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
809
810 /* Stats init */
811 ixv_init_stats(adapter);
812
813 /* Config/Enable Link */
814 hw->mac.get_link_status = TRUE;
815 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
816 FALSE);
817
818 /* Start watchdog */
819 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
820
821 /* And now turn on interrupts */
822 ixv_enable_intr(adapter);
823
824 /* Update saved flags. See ixgbe_ifflags_cb() */
825 adapter->if_flags = ifp->if_flags;
826
827 /* Now inform the stack we're ready */
828 ifp->if_flags |= IFF_RUNNING;
829 ifp->if_flags &= ~IFF_OACTIVE;
830
831 return;
832 } /* ixv_init_locked */
833
834 /************************************************************************
835 * ixv_enable_queue
836 ************************************************************************/
837 static inline void
838 ixv_enable_queue(struct adapter *adapter, u32 vector)
839 {
840 struct ixgbe_hw *hw = &adapter->hw;
841 struct ix_queue *que = &adapter->queues[vector];
842 u32 queue = 1 << vector;
843 u32 mask;
844
845 mutex_enter(&que->dc_mtx);
846 if (que->disabled_count > 0 && --que->disabled_count > 0)
847 goto out;
848
849 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
850 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
851 out:
852 mutex_exit(&que->dc_mtx);
853 } /* ixv_enable_queue */
854
855 /************************************************************************
856 * ixv_disable_queue
857 ************************************************************************/
858 static inline void
859 ixv_disable_queue(struct adapter *adapter, u32 vector)
860 {
861 struct ixgbe_hw *hw = &adapter->hw;
862 struct ix_queue *que = &adapter->queues[vector];
863 u64 queue = (u64)(1 << vector);
864 u32 mask;
865
866 mutex_enter(&que->dc_mtx);
867 if (que->disabled_count++ > 0)
868 goto out;
869
870 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
871 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
872 out:
873 mutex_exit(&que->dc_mtx);
874 } /* ixv_disable_queue */
875
876 static inline void
877 ixv_rearm_queues(struct adapter *adapter, u64 queues)
878 {
879 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
880 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
881 } /* ixv_rearm_queues */
882
883
884 /************************************************************************
885 * ixv_msix_que - MSI-X Queue Interrupt Service routine
886 ************************************************************************/
887 static int
888 ixv_msix_que(void *arg)
889 {
890 struct ix_queue *que = arg;
891 struct adapter *adapter = que->adapter;
892 struct tx_ring *txr = que->txr;
893 struct rx_ring *rxr = que->rxr;
894 bool more;
895 u32 newitr = 0;
896
897 ixv_disable_queue(adapter, que->msix);
898 ++que->irqs.ev_count;
899
900 #ifdef __NetBSD__
901 /* Don't run ixgbe_rxeof in interrupt context */
902 more = true;
903 #else
904 more = ixgbe_rxeof(que);
905 #endif
906
907 IXGBE_TX_LOCK(txr);
908 ixgbe_txeof(txr);
909 IXGBE_TX_UNLOCK(txr);
910
911 /* Do AIM now? */
912
913 if (adapter->enable_aim == false)
914 goto no_calc;
915 /*
916 * Do Adaptive Interrupt Moderation:
917 * - Write out last calculated setting
918 * - Calculate based on average size over
919 * the last interval.
920 */
921 if (que->eitr_setting)
922 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
923
924 que->eitr_setting = 0;
925
926 /* Idle, do nothing */
927 if ((txr->bytes == 0) && (rxr->bytes == 0))
928 goto no_calc;
929
930 if ((txr->bytes) && (txr->packets))
931 newitr = txr->bytes/txr->packets;
932 if ((rxr->bytes) && (rxr->packets))
933 newitr = max(newitr, (rxr->bytes / rxr->packets));
934 newitr += 24; /* account for hardware frame, crc */
935
936 /* set an upper boundary */
937 newitr = min(newitr, 3000);
938
939 /* Be nice to the mid range */
940 if ((newitr > 300) && (newitr < 1200))
941 newitr = (newitr / 3);
942 else
943 newitr = (newitr / 2);
944
945 /*
946 * When RSC is used, ITR interval must be larger than RSC_DELAY.
947 * Currently, we use 2us for RSC_DELAY. The minimum value is always
948 * greater than 2us on 100M (and 10M?(not documented)), but it's not
949 * on 1G and higher.
950 */
951 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
952 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
953 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
954 newitr = IXGBE_MIN_RSC_EITR_10G1G;
955 }
956
957 /* save for next interrupt */
958 que->eitr_setting = newitr;
959
960 /* Reset state */
961 txr->bytes = 0;
962 txr->packets = 0;
963 rxr->bytes = 0;
964 rxr->packets = 0;
965
966 no_calc:
967 if (more)
968 softint_schedule(que->que_si);
969 else /* Re-enable this interrupt */
970 ixv_enable_queue(adapter, que->msix);
971
972 return 1;
973 } /* ixv_msix_que */
974
975 /************************************************************************
976 * ixv_msix_mbx
977 ************************************************************************/
978 static int
979 ixv_msix_mbx(void *arg)
980 {
981 struct adapter *adapter = arg;
982 struct ixgbe_hw *hw = &adapter->hw;
983
984 ++adapter->link_irq.ev_count;
985 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
986
987 /* Link status change */
988 hw->mac.get_link_status = TRUE;
989 softint_schedule(adapter->link_si);
990
991 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
992
993 return 1;
994 } /* ixv_msix_mbx */
995
996 static void
997 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
998 {
999
1000 /*
1001 * Newer devices than 82598 have VF function, so this function is
1002 * simple.
1003 */
1004 itr |= IXGBE_EITR_CNT_WDIS;
1005
1006 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
1007 }
1008
1009
1010 /************************************************************************
1011 * ixv_media_status - Media Ioctl callback
1012 *
1013 * Called whenever the user queries the status of
1014 * the interface using ifconfig.
1015 ************************************************************************/
1016 static void
1017 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1018 {
1019 struct adapter *adapter = ifp->if_softc;
1020
1021 INIT_DEBUGOUT("ixv_media_status: begin");
1022 IXGBE_CORE_LOCK(adapter);
1023 ixv_update_link_status(adapter);
1024
1025 ifmr->ifm_status = IFM_AVALID;
1026 ifmr->ifm_active = IFM_ETHER;
1027
1028 if (!adapter->link_active) {
1029 ifmr->ifm_active |= IFM_NONE;
1030 IXGBE_CORE_UNLOCK(adapter);
1031 return;
1032 }
1033
1034 ifmr->ifm_status |= IFM_ACTIVE;
1035
1036 switch (adapter->link_speed) {
1037 case IXGBE_LINK_SPEED_10GB_FULL:
1038 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1039 break;
1040 case IXGBE_LINK_SPEED_5GB_FULL:
1041 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1042 break;
1043 case IXGBE_LINK_SPEED_2_5GB_FULL:
1044 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1045 break;
1046 case IXGBE_LINK_SPEED_1GB_FULL:
1047 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1048 break;
1049 case IXGBE_LINK_SPEED_100_FULL:
1050 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1051 break;
1052 case IXGBE_LINK_SPEED_10_FULL:
1053 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1054 break;
1055 }
1056
1057 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1058
1059 IXGBE_CORE_UNLOCK(adapter);
1060 } /* ixv_media_status */
1061
1062 /************************************************************************
1063 * ixv_media_change - Media Ioctl callback
1064 *
1065 * Called when the user changes speed/duplex using
1066 * media/mediopt option with ifconfig.
1067 ************************************************************************/
1068 static int
1069 ixv_media_change(struct ifnet *ifp)
1070 {
1071 struct adapter *adapter = ifp->if_softc;
1072 struct ifmedia *ifm = &adapter->media;
1073
1074 INIT_DEBUGOUT("ixv_media_change: begin");
1075
1076 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1077 return (EINVAL);
1078
1079 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1080 case IFM_AUTO:
1081 break;
1082 default:
1083 device_printf(adapter->dev, "Only auto media type\n");
1084 return (EINVAL);
1085 }
1086
1087 return (0);
1088 } /* ixv_media_change */
1089
1090
1091 /************************************************************************
1092 * ixv_negotiate_api
1093 *
1094 * Negotiate the Mailbox API with the PF;
1095 * start with the most featured API first.
1096 ************************************************************************/
1097 static int
1098 ixv_negotiate_api(struct adapter *adapter)
1099 {
1100 struct ixgbe_hw *hw = &adapter->hw;
1101 int mbx_api[] = { ixgbe_mbox_api_11,
1102 ixgbe_mbox_api_10,
1103 ixgbe_mbox_api_unknown };
1104 int i = 0;
1105
1106 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1107 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1108 return (0);
1109 i++;
1110 }
1111
1112 return (EINVAL);
1113 } /* ixv_negotiate_api */
1114
1115
1116 /************************************************************************
1117 * ixv_set_multi - Multicast Update
1118 *
1119 * Called whenever multicast address list is updated.
1120 ************************************************************************/
1121 static void
1122 ixv_set_multi(struct adapter *adapter)
1123 {
1124 struct ether_multi *enm;
1125 struct ether_multistep step;
1126 struct ethercom *ec = &adapter->osdep.ec;
1127 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1128 u8 *update_ptr;
1129 int mcnt = 0;
1130
1131 KASSERT(mutex_owned(&adapter->core_mtx));
1132 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1133
1134 ETHER_LOCK(ec);
1135 ETHER_FIRST_MULTI(step, ec, enm);
1136 while (enm != NULL) {
1137 bcopy(enm->enm_addrlo,
1138 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1139 IXGBE_ETH_LENGTH_OF_ADDRESS);
1140 mcnt++;
1141 /* XXX This might be required --msaitoh */
1142 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1143 break;
1144 ETHER_NEXT_MULTI(step, enm);
1145 }
1146 ETHER_UNLOCK(ec);
1147
1148 update_ptr = mta;
1149
1150 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1151 ixv_mc_array_itr, TRUE);
1152 } /* ixv_set_multi */
1153
1154 /************************************************************************
1155 * ixv_mc_array_itr
1156 *
1157 * An iterator function needed by the multicast shared code.
1158 * It feeds the shared code routine the addresses in the
1159 * array of ixv_set_multi() one by one.
1160 ************************************************************************/
1161 static u8 *
1162 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1163 {
1164 u8 *addr = *update_ptr;
1165 u8 *newptr;
1166
1167 *vmdq = 0;
1168
1169 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1170 *update_ptr = newptr;
1171
1172 return addr;
1173 } /* ixv_mc_array_itr */
1174
1175 /************************************************************************
1176 * ixv_local_timer - Timer routine
1177 *
1178 * Checks for link status, updates statistics,
1179 * and runs the watchdog check.
1180 ************************************************************************/
1181 static void
1182 ixv_local_timer(void *arg)
1183 {
1184 struct adapter *adapter = arg;
1185
1186 IXGBE_CORE_LOCK(adapter);
1187 ixv_local_timer_locked(adapter);
1188 IXGBE_CORE_UNLOCK(adapter);
1189 }
1190
1191 static void
1192 ixv_local_timer_locked(void *arg)
1193 {
1194 struct adapter *adapter = arg;
1195 device_t dev = adapter->dev;
1196 struct ix_queue *que = adapter->queues;
1197 u64 queues = 0;
1198 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1199 int hung = 0;
1200 int i;
1201
1202 KASSERT(mutex_owned(&adapter->core_mtx));
1203
1204 ixv_check_link(adapter);
1205
1206 /* Stats Update */
1207 ixv_update_stats(adapter);
1208
1209 /* Update some event counters */
1210 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1211 que = adapter->queues;
1212 for (i = 0; i < adapter->num_queues; i++, que++) {
1213 struct tx_ring *txr = que->txr;
1214
1215 v0 += txr->q_efbig_tx_dma_setup;
1216 v1 += txr->q_mbuf_defrag_failed;
1217 v2 += txr->q_efbig2_tx_dma_setup;
1218 v3 += txr->q_einval_tx_dma_setup;
1219 v4 += txr->q_other_tx_dma_setup;
1220 v5 += txr->q_eagain_tx_dma_setup;
1221 v6 += txr->q_enomem_tx_dma_setup;
1222 v7 += txr->q_tso_err;
1223 }
1224 adapter->efbig_tx_dma_setup.ev_count = v0;
1225 adapter->mbuf_defrag_failed.ev_count = v1;
1226 adapter->efbig2_tx_dma_setup.ev_count = v2;
1227 adapter->einval_tx_dma_setup.ev_count = v3;
1228 adapter->other_tx_dma_setup.ev_count = v4;
1229 adapter->eagain_tx_dma_setup.ev_count = v5;
1230 adapter->enomem_tx_dma_setup.ev_count = v6;
1231 adapter->tso_err.ev_count = v7;
1232
1233 /*
1234 * Check the TX queues status
1235 * - mark hung queues so we don't schedule on them
1236 * - watchdog only if all queues show hung
1237 */
1238 que = adapter->queues;
1239 for (i = 0; i < adapter->num_queues; i++, que++) {
1240 /* Keep track of queues with work for soft irq */
1241 if (que->txr->busy)
1242 queues |= ((u64)1 << que->me);
1243 /*
1244 * Each time txeof runs without cleaning, but there
1245 * are uncleaned descriptors it increments busy. If
1246 * we get to the MAX we declare it hung.
1247 */
1248 if (que->busy == IXGBE_QUEUE_HUNG) {
1249 ++hung;
1250 /* Mark the queue as inactive */
1251 adapter->active_queues &= ~((u64)1 << que->me);
1252 continue;
1253 } else {
1254 /* Check if we've come back from hung */
1255 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1256 adapter->active_queues |= ((u64)1 << que->me);
1257 }
1258 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1259 device_printf(dev,
1260 "Warning queue %d appears to be hung!\n", i);
1261 que->txr->busy = IXGBE_QUEUE_HUNG;
1262 ++hung;
1263 }
1264 }
1265
1266 /* Only truly watchdog if all queues show hung */
1267 if (hung == adapter->num_queues)
1268 goto watchdog;
1269 else if (queues != 0) { /* Force an IRQ on queues with work */
1270 ixv_rearm_queues(adapter, queues);
1271 }
1272
1273 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1274
1275 return;
1276
1277 watchdog:
1278
1279 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1280 adapter->ifp->if_flags &= ~IFF_RUNNING;
1281 adapter->watchdog_events.ev_count++;
1282 ixv_init_locked(adapter);
1283 } /* ixv_local_timer */
1284
1285 /************************************************************************
1286 * ixv_update_link_status - Update OS on link state
1287 *
1288 * Note: Only updates the OS on the cached link state.
1289 * The real check of the hardware only happens with
1290 * a link interrupt.
1291 ************************************************************************/
1292 static void
1293 ixv_update_link_status(struct adapter *adapter)
1294 {
1295 struct ifnet *ifp = adapter->ifp;
1296 device_t dev = adapter->dev;
1297
1298 KASSERT(mutex_owned(&adapter->core_mtx));
1299
1300 if (adapter->link_up) {
1301 if (adapter->link_active == FALSE) {
1302 if (bootverbose) {
1303 const char *bpsmsg;
1304
1305 switch (adapter->link_speed) {
1306 case IXGBE_LINK_SPEED_10GB_FULL:
1307 bpsmsg = "10 Gbps";
1308 break;
1309 case IXGBE_LINK_SPEED_5GB_FULL:
1310 bpsmsg = "5 Gbps";
1311 break;
1312 case IXGBE_LINK_SPEED_2_5GB_FULL:
1313 bpsmsg = "2.5 Gbps";
1314 break;
1315 case IXGBE_LINK_SPEED_1GB_FULL:
1316 bpsmsg = "1 Gbps";
1317 break;
1318 case IXGBE_LINK_SPEED_100_FULL:
1319 bpsmsg = "100 Mbps";
1320 break;
1321 case IXGBE_LINK_SPEED_10_FULL:
1322 bpsmsg = "10 Mbps";
1323 break;
1324 default:
1325 bpsmsg = "unknown speed";
1326 break;
1327 }
1328 device_printf(dev, "Link is up %s %s \n",
1329 bpsmsg, "Full Duplex");
1330 }
1331 adapter->link_active = TRUE;
1332 if_link_state_change(ifp, LINK_STATE_UP);
1333 }
1334 } else { /* Link down */
1335 if (adapter->link_active == TRUE) {
1336 if (bootverbose)
1337 device_printf(dev, "Link is Down\n");
1338 if_link_state_change(ifp, LINK_STATE_DOWN);
1339 adapter->link_active = FALSE;
1340 }
1341 }
1342 } /* ixv_update_link_status */
1343
1344
1345 /************************************************************************
1346 * ixv_stop - Stop the hardware
1347 *
1348 * Disables all traffic on the adapter by issuing a
1349 * global reset on the MAC and deallocates TX/RX buffers.
1350 ************************************************************************/
1351 static void
1352 ixv_ifstop(struct ifnet *ifp, int disable)
1353 {
1354 struct adapter *adapter = ifp->if_softc;
1355
1356 IXGBE_CORE_LOCK(adapter);
1357 ixv_stop(adapter);
1358 IXGBE_CORE_UNLOCK(adapter);
1359 }
1360
1361 static void
1362 ixv_stop(void *arg)
1363 {
1364 struct ifnet *ifp;
1365 struct adapter *adapter = arg;
1366 struct ixgbe_hw *hw = &adapter->hw;
1367
1368 ifp = adapter->ifp;
1369
1370 KASSERT(mutex_owned(&adapter->core_mtx));
1371
1372 INIT_DEBUGOUT("ixv_stop: begin\n");
1373 ixv_disable_intr(adapter);
1374
1375 /* Tell the stack that the interface is no longer active */
1376 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1377
1378 hw->mac.ops.reset_hw(hw);
1379 adapter->hw.adapter_stopped = FALSE;
1380 hw->mac.ops.stop_adapter(hw);
1381 callout_stop(&adapter->timer);
1382
1383 /* reprogram the RAR[0] in case user changed it. */
1384 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1385
1386 return;
1387 } /* ixv_stop */
1388
1389
1390 /************************************************************************
1391 * ixv_allocate_pci_resources
1392 ************************************************************************/
1393 static int
1394 ixv_allocate_pci_resources(struct adapter *adapter,
1395 const struct pci_attach_args *pa)
1396 {
1397 pcireg_t memtype;
1398 device_t dev = adapter->dev;
1399 bus_addr_t addr;
1400 int flags;
1401
1402 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1403 switch (memtype) {
1404 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1405 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1406 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1407 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1408 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1409 goto map_err;
1410 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1411 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1412 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1413 }
1414 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1415 adapter->osdep.mem_size, flags,
1416 &adapter->osdep.mem_bus_space_handle) != 0) {
1417 map_err:
1418 adapter->osdep.mem_size = 0;
1419 aprint_error_dev(dev, "unable to map BAR0\n");
1420 return ENXIO;
1421 }
1422 break;
1423 default:
1424 aprint_error_dev(dev, "unexpected type on BAR0\n");
1425 return ENXIO;
1426 }
1427
1428 /* Pick up the tuneable queues */
1429 adapter->num_queues = ixv_num_queues;
1430
1431 return (0);
1432 } /* ixv_allocate_pci_resources */
1433
1434 /************************************************************************
1435 * ixv_free_pci_resources
1436 ************************************************************************/
1437 static void
1438 ixv_free_pci_resources(struct adapter * adapter)
1439 {
1440 struct ix_queue *que = adapter->queues;
1441 int rid;
1442
1443 /*
1444 * Release all msix queue resources:
1445 */
1446 for (int i = 0; i < adapter->num_queues; i++, que++) {
1447 if (que->res != NULL)
1448 pci_intr_disestablish(adapter->osdep.pc,
1449 adapter->osdep.ihs[i]);
1450 }
1451
1452
1453 /* Clean the Mailbox interrupt last */
1454 rid = adapter->vector;
1455
1456 if (adapter->osdep.ihs[rid] != NULL) {
1457 pci_intr_disestablish(adapter->osdep.pc,
1458 adapter->osdep.ihs[rid]);
1459 adapter->osdep.ihs[rid] = NULL;
1460 }
1461
1462 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1463 adapter->osdep.nintrs);
1464
1465 if (adapter->osdep.mem_size != 0) {
1466 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1467 adapter->osdep.mem_bus_space_handle,
1468 adapter->osdep.mem_size);
1469 }
1470
1471 return;
1472 } /* ixv_free_pci_resources */
1473
1474 /************************************************************************
1475 * ixv_setup_interface
1476 *
1477 * Setup networking device structure and register an interface.
1478 ************************************************************************/
1479 static int
1480 ixv_setup_interface(device_t dev, struct adapter *adapter)
1481 {
1482 struct ethercom *ec = &adapter->osdep.ec;
1483 struct ifnet *ifp;
1484 int rv;
1485
1486 INIT_DEBUGOUT("ixv_setup_interface: begin");
1487
1488 ifp = adapter->ifp = &ec->ec_if;
1489 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1490 ifp->if_baudrate = IF_Gbps(10);
1491 ifp->if_init = ixv_init;
1492 ifp->if_stop = ixv_ifstop;
1493 ifp->if_softc = adapter;
1494 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1495 #ifdef IXGBE_MPSAFE
1496 ifp->if_extflags = IFEF_MPSAFE;
1497 #endif
1498 ifp->if_ioctl = ixv_ioctl;
1499 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1500 #if 0
1501 ixv_start_locked = ixgbe_legacy_start_locked;
1502 #endif
1503 } else {
1504 ifp->if_transmit = ixgbe_mq_start;
1505 #if 0
1506 ixv_start_locked = ixgbe_mq_start_locked;
1507 #endif
1508 }
1509 ifp->if_start = ixgbe_legacy_start;
1510 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1511 IFQ_SET_READY(&ifp->if_snd);
1512
1513 rv = if_initialize(ifp);
1514 if (rv != 0) {
1515 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1516 return rv;
1517 }
1518 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1519 ether_ifattach(ifp, adapter->hw.mac.addr);
1520 /*
1521 * We use per TX queue softint, so if_deferred_start_init() isn't
1522 * used.
1523 */
1524 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1525
1526 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1527
1528 /*
1529 * Tell the upper layer(s) we support long frames.
1530 */
1531 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1532
1533 /* Set capability flags */
1534 ifp->if_capabilities |= IFCAP_HWCSUM
1535 | IFCAP_TSOv4
1536 | IFCAP_TSOv6;
1537 ifp->if_capenable = 0;
1538
1539 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1540 | ETHERCAP_VLAN_HWCSUM
1541 | ETHERCAP_JUMBO_MTU
1542 | ETHERCAP_VLAN_MTU;
1543
1544 /* Enable the above capabilities by default */
1545 ec->ec_capenable = ec->ec_capabilities;
1546
1547 /* Don't enable LRO by default */
1548 ifp->if_capabilities |= IFCAP_LRO;
1549 #if 0
1550 ifp->if_capenable = ifp->if_capabilities;
1551 #endif
1552
1553 /*
1554 * Specify the media types supported by this adapter and register
1555 * callbacks to update media and link information
1556 */
1557 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1558 ixv_media_status);
1559 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1560 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1561
1562 if_register(ifp);
1563
1564 return 0;
1565 } /* ixv_setup_interface */
1566
1567
1568 /************************************************************************
1569 * ixv_initialize_transmit_units - Enable transmit unit.
1570 ************************************************************************/
1571 static void
1572 ixv_initialize_transmit_units(struct adapter *adapter)
1573 {
1574 struct tx_ring *txr = adapter->tx_rings;
1575 struct ixgbe_hw *hw = &adapter->hw;
1576 int i;
1577
1578 for (i = 0; i < adapter->num_queues; i++, txr++) {
1579 u64 tdba = txr->txdma.dma_paddr;
1580 u32 txctrl, txdctl;
1581 int j = txr->me;
1582
1583 /* Set WTHRESH to 8, burst writeback */
1584 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1585 txdctl |= (8 << 16);
1586 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1587
1588 /* Set the HW Tx Head and Tail indices */
1589 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1590 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1591
1592 /* Set Tx Tail register */
1593 txr->tail = IXGBE_VFTDT(j);
1594
1595 txr->txr_no_space = false;
1596
1597 /* Set Ring parameters */
1598 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1599 (tdba & 0x00000000ffffffffULL));
1600 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1601 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1602 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1603 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1604 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1605 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1606
1607 /* Now enable */
1608 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1609 txdctl |= IXGBE_TXDCTL_ENABLE;
1610 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1611 }
1612
1613 return;
1614 } /* ixv_initialize_transmit_units */
1615
1616
1617 /************************************************************************
1618 * ixv_initialize_rss_mapping
1619 ************************************************************************/
1620 static void
1621 ixv_initialize_rss_mapping(struct adapter *adapter)
1622 {
1623 struct ixgbe_hw *hw = &adapter->hw;
1624 u32 reta = 0, mrqc, rss_key[10];
1625 int queue_id;
1626 int i, j;
1627 u32 rss_hash_config;
1628
1629 /* force use default RSS key. */
1630 #ifdef __NetBSD__
1631 rss_getkey((uint8_t *) &rss_key);
1632 #else
1633 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1634 /* Fetch the configured RSS key */
1635 rss_getkey((uint8_t *)&rss_key);
1636 } else {
1637 /* set up random bits */
1638 cprng_fast(&rss_key, sizeof(rss_key));
1639 }
1640 #endif
1641
1642 /* Now fill out hash function seeds */
1643 for (i = 0; i < 10; i++)
1644 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1645
1646 /* Set up the redirection table */
1647 for (i = 0, j = 0; i < 64; i++, j++) {
1648 if (j == adapter->num_queues)
1649 j = 0;
1650
1651 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1652 /*
1653 * Fetch the RSS bucket id for the given indirection
1654 * entry. Cap it at the number of configured buckets
1655 * (which is num_queues.)
1656 */
1657 queue_id = rss_get_indirection_to_bucket(i);
1658 queue_id = queue_id % adapter->num_queues;
1659 } else
1660 queue_id = j;
1661
1662 /*
1663 * The low 8 bits are for hash value (n+0);
1664 * The next 8 bits are for hash value (n+1), etc.
1665 */
1666 reta >>= 8;
1667 reta |= ((uint32_t)queue_id) << 24;
1668 if ((i & 3) == 3) {
1669 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1670 reta = 0;
1671 }
1672 }
1673
1674 /* Perform hash on these packet types */
1675 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1676 rss_hash_config = rss_gethashconfig();
1677 else {
1678 /*
1679 * Disable UDP - IP fragments aren't currently being handled
1680 * and so we end up with a mix of 2-tuple and 4-tuple
1681 * traffic.
1682 */
1683 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1684 | RSS_HASHTYPE_RSS_TCP_IPV4
1685 | RSS_HASHTYPE_RSS_IPV6
1686 | RSS_HASHTYPE_RSS_TCP_IPV6;
1687 }
1688
1689 mrqc = IXGBE_MRQC_RSSEN;
1690 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1691 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1692 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1693 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1694 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1695 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1696 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1697 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1698 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1699 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1700 __func__);
1701 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1702 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1703 __func__);
1704 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1705 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1706 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1707 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1708 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1709 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1710 __func__);
1711 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1712 } /* ixv_initialize_rss_mapping */
1713
1714
1715 /************************************************************************
1716 * ixv_initialize_receive_units - Setup receive registers and features.
1717 ************************************************************************/
1718 static void
1719 ixv_initialize_receive_units(struct adapter *adapter)
1720 {
1721 struct rx_ring *rxr = adapter->rx_rings;
1722 struct ixgbe_hw *hw = &adapter->hw;
1723 struct ifnet *ifp = adapter->ifp;
1724 u32 bufsz, rxcsum, psrtype;
1725
1726 if (ifp->if_mtu > ETHERMTU)
1727 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1728 else
1729 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1730
1731 psrtype = IXGBE_PSRTYPE_TCPHDR
1732 | IXGBE_PSRTYPE_UDPHDR
1733 | IXGBE_PSRTYPE_IPV4HDR
1734 | IXGBE_PSRTYPE_IPV6HDR
1735 | IXGBE_PSRTYPE_L2HDR;
1736
1737 if (adapter->num_queues > 1)
1738 psrtype |= 1 << 29;
1739
1740 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1741
1742 /* Tell PF our max_frame size */
1743 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1744 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1745 }
1746
1747 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1748 u64 rdba = rxr->rxdma.dma_paddr;
1749 u32 reg, rxdctl;
1750 int j = rxr->me;
1751
1752 /* Disable the queue */
1753 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1754 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1755 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1756 for (int k = 0; k < 10; k++) {
1757 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1758 IXGBE_RXDCTL_ENABLE)
1759 msec_delay(1);
1760 else
1761 break;
1762 }
1763 wmb();
1764 /* Setup the Base and Length of the Rx Descriptor Ring */
1765 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1766 (rdba & 0x00000000ffffffffULL));
1767 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1768 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1769 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1770
1771 /* Reset the ring indices */
1772 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1773 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1774
1775 /* Set up the SRRCTL register */
1776 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1777 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1778 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1779 reg |= bufsz;
1780 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1781 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1782
1783 /* Capture Rx Tail index */
1784 rxr->tail = IXGBE_VFRDT(rxr->me);
1785
1786 /* Do the queue enabling last */
1787 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1788 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1789 for (int k = 0; k < 10; k++) {
1790 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1791 IXGBE_RXDCTL_ENABLE)
1792 break;
1793 msec_delay(1);
1794 }
1795 wmb();
1796
1797 /* Set the Tail Pointer */
1798 #ifdef DEV_NETMAP
1799 /*
1800 * In netmap mode, we must preserve the buffers made
1801 * available to userspace before the if_init()
1802 * (this is true by default on the TX side, because
1803 * init makes all buffers available to userspace).
1804 *
1805 * netmap_reset() and the device specific routines
1806 * (e.g. ixgbe_setup_receive_rings()) map these
1807 * buffers at the end of the NIC ring, so here we
1808 * must set the RDT (tail) register to make sure
1809 * they are not overwritten.
1810 *
1811 * In this driver the NIC ring starts at RDH = 0,
1812 * RDT points to the last slot available for reception (?),
1813 * so RDT = num_rx_desc - 1 means the whole ring is available.
1814 */
1815 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1816 (ifp->if_capenable & IFCAP_NETMAP)) {
1817 struct netmap_adapter *na = NA(adapter->ifp);
1818 struct netmap_kring *kring = &na->rx_rings[i];
1819 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1820
1821 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1822 } else
1823 #endif /* DEV_NETMAP */
1824 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1825 adapter->num_rx_desc - 1);
1826 }
1827
1828 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1829
1830 ixv_initialize_rss_mapping(adapter);
1831
1832 if (adapter->num_queues > 1) {
1833 /* RSS and RX IPP Checksum are mutually exclusive */
1834 rxcsum |= IXGBE_RXCSUM_PCSD;
1835 }
1836
1837 if (ifp->if_capenable & IFCAP_RXCSUM)
1838 rxcsum |= IXGBE_RXCSUM_PCSD;
1839
1840 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1841 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1842
1843 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1844 } /* ixv_initialize_receive_units */
1845
1846 /************************************************************************
1847 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1848 *
1849 * Retrieves the TDH value from the hardware
1850 ************************************************************************/
1851 static int
1852 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1853 {
1854 struct sysctlnode node = *rnode;
1855 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1856 uint32_t val;
1857
1858 if (!txr)
1859 return (0);
1860
1861 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1862 node.sysctl_data = &val;
1863 return sysctl_lookup(SYSCTLFN_CALL(&node));
1864 } /* ixv_sysctl_tdh_handler */
1865
1866 /************************************************************************
1867 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1868 *
1869 * Retrieves the TDT value from the hardware
1870 ************************************************************************/
1871 static int
1872 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1873 {
1874 struct sysctlnode node = *rnode;
1875 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1876 uint32_t val;
1877
1878 if (!txr)
1879 return (0);
1880
1881 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1882 node.sysctl_data = &val;
1883 return sysctl_lookup(SYSCTLFN_CALL(&node));
1884 } /* ixv_sysctl_tdt_handler */
1885
1886 /************************************************************************
1887 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1888 * handler function
1889 *
1890 * Retrieves the next_to_check value
1891 ************************************************************************/
1892 static int
1893 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1894 {
1895 struct sysctlnode node = *rnode;
1896 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1897 uint32_t val;
1898
1899 if (!rxr)
1900 return (0);
1901
1902 val = rxr->next_to_check;
1903 node.sysctl_data = &val;
1904 return sysctl_lookup(SYSCTLFN_CALL(&node));
1905 } /* ixv_sysctl_next_to_check_handler */
1906
1907 /************************************************************************
1908 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1909 *
1910 * Retrieves the RDH value from the hardware
1911 ************************************************************************/
1912 static int
1913 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1914 {
1915 struct sysctlnode node = *rnode;
1916 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1917 uint32_t val;
1918
1919 if (!rxr)
1920 return (0);
1921
1922 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1923 node.sysctl_data = &val;
1924 return sysctl_lookup(SYSCTLFN_CALL(&node));
1925 } /* ixv_sysctl_rdh_handler */
1926
1927 /************************************************************************
1928 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1929 *
1930 * Retrieves the RDT value from the hardware
1931 ************************************************************************/
1932 static int
1933 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1934 {
1935 struct sysctlnode node = *rnode;
1936 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1937 uint32_t val;
1938
1939 if (!rxr)
1940 return (0);
1941
1942 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1943 node.sysctl_data = &val;
1944 return sysctl_lookup(SYSCTLFN_CALL(&node));
1945 } /* ixv_sysctl_rdt_handler */
1946
1947 /************************************************************************
1948 * ixv_setup_vlan_support
1949 ************************************************************************/
1950 static void
1951 ixv_setup_vlan_support(struct adapter *adapter)
1952 {
1953 struct ethercom *ec = &adapter->osdep.ec;
1954 struct ixgbe_hw *hw = &adapter->hw;
1955 struct rx_ring *rxr;
1956 u32 ctrl, vid, vfta, retry;
1957
1958 /*
1959 * We get here thru init_locked, meaning
1960 * a soft reset, this has already cleared
1961 * the VFTA and other state, so if there
1962 * have been no vlan's registered do nothing.
1963 */
1964 if (!VLAN_ATTACHED(ec))
1965 return;
1966
1967 /* Enable the queues */
1968 for (int i = 0; i < adapter->num_queues; i++) {
1969 rxr = &adapter->rx_rings[i];
1970 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1971 ctrl |= IXGBE_RXDCTL_VME;
1972 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1973 /*
1974 * Let Rx path know that it needs to store VLAN tag
1975 * as part of extra mbuf info.
1976 */
1977 rxr->vtag_strip = TRUE;
1978 }
1979
1980 #if 1
1981 /* XXX dirty hack. Enable all VIDs */
1982 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1983 adapter->shadow_vfta[i] = 0xffffffff;
1984 #endif
1985 /*
1986 * A soft reset zero's out the VFTA, so
1987 * we need to repopulate it now.
1988 */
1989 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1990 if (adapter->shadow_vfta[i] == 0)
1991 continue;
1992 vfta = adapter->shadow_vfta[i];
1993 /*
1994 * Reconstruct the vlan id's
1995 * based on the bits set in each
1996 * of the array ints.
1997 */
1998 for (int j = 0; j < 32; j++) {
1999 retry = 0;
2000 if ((vfta & (1 << j)) == 0)
2001 continue;
2002 vid = (i * 32) + j;
2003 /* Call the shared code mailbox routine */
2004 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
2005 if (++retry > 5)
2006 break;
2007 }
2008 }
2009 }
2010 } /* ixv_setup_vlan_support */
2011
2012 #if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */
2013 /************************************************************************
2014 * ixv_register_vlan
2015 *
2016 * Run via a vlan config EVENT, it enables us to use the
2017 * HW Filter table since we can get the vlan id. This just
2018 * creates the entry in the soft version of the VFTA, init
2019 * will repopulate the real table.
2020 ************************************************************************/
2021 static void
2022 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2023 {
2024 struct adapter *adapter = ifp->if_softc;
2025 u16 index, bit;
2026
2027 if (ifp->if_softc != arg) /* Not our event */
2028 return;
2029
2030 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2031 return;
2032
2033 IXGBE_CORE_LOCK(adapter);
2034 index = (vtag >> 5) & 0x7F;
2035 bit = vtag & 0x1F;
2036 adapter->shadow_vfta[index] |= (1 << bit);
2037 /* Re-init to load the changes */
2038 ixv_init_locked(adapter);
2039 IXGBE_CORE_UNLOCK(adapter);
2040 } /* ixv_register_vlan */
2041
2042 /************************************************************************
2043 * ixv_unregister_vlan
2044 *
2045 * Run via a vlan unconfig EVENT, remove our entry
2046 * in the soft vfta.
2047 ************************************************************************/
2048 static void
2049 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2050 {
2051 struct adapter *adapter = ifp->if_softc;
2052 u16 index, bit;
2053
2054 if (ifp->if_softc != arg)
2055 return;
2056
2057 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2058 return;
2059
2060 IXGBE_CORE_LOCK(adapter);
2061 index = (vtag >> 5) & 0x7F;
2062 bit = vtag & 0x1F;
2063 adapter->shadow_vfta[index] &= ~(1 << bit);
2064 /* Re-init to load the changes */
2065 ixv_init_locked(adapter);
2066 IXGBE_CORE_UNLOCK(adapter);
2067 } /* ixv_unregister_vlan */
2068 #endif
2069
2070 /************************************************************************
2071 * ixv_enable_intr
2072 ************************************************************************/
2073 static void
2074 ixv_enable_intr(struct adapter *adapter)
2075 {
2076 struct ixgbe_hw *hw = &adapter->hw;
2077 struct ix_queue *que = adapter->queues;
2078 u32 mask;
2079 int i;
2080
2081 /* For VTEIAC */
2082 mask = (1 << adapter->vector);
2083 for (i = 0; i < adapter->num_queues; i++, que++)
2084 mask |= (1 << que->msix);
2085 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2086
2087 /* For VTEIMS */
2088 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2089 que = adapter->queues;
2090 for (i = 0; i < adapter->num_queues; i++, que++)
2091 ixv_enable_queue(adapter, que->msix);
2092
2093 IXGBE_WRITE_FLUSH(hw);
2094 } /* ixv_enable_intr */
2095
2096 /************************************************************************
2097 * ixv_disable_intr
2098 ************************************************************************/
2099 static void
2100 ixv_disable_intr(struct adapter *adapter)
2101 {
2102 struct ix_queue *que = adapter->queues;
2103
2104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2105
2106 /* disable interrupts other than queues */
2107 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2108
2109 for (int i = 0; i < adapter->num_queues; i++, que++)
2110 ixv_disable_queue(adapter, que->msix);
2111
2112 IXGBE_WRITE_FLUSH(&adapter->hw);
2113 } /* ixv_disable_intr */
2114
2115 /************************************************************************
2116 * ixv_set_ivar
2117 *
2118 * Setup the correct IVAR register for a particular MSI-X interrupt
2119 * - entry is the register array entry
2120 * - vector is the MSI-X vector for this queue
2121 * - type is RX/TX/MISC
2122 ************************************************************************/
2123 static void
2124 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2125 {
2126 struct ixgbe_hw *hw = &adapter->hw;
2127 u32 ivar, index;
2128
2129 vector |= IXGBE_IVAR_ALLOC_VAL;
2130
2131 if (type == -1) { /* MISC IVAR */
2132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2133 ivar &= ~0xFF;
2134 ivar |= vector;
2135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2136 } else { /* RX/TX IVARS */
2137 index = (16 * (entry & 1)) + (8 * type);
2138 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2139 ivar &= ~(0xFF << index);
2140 ivar |= (vector << index);
2141 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2142 }
2143 } /* ixv_set_ivar */
2144
2145 /************************************************************************
2146 * ixv_configure_ivars
2147 ************************************************************************/
2148 static void
2149 ixv_configure_ivars(struct adapter *adapter)
2150 {
2151 struct ix_queue *que = adapter->queues;
2152
2153 /* XXX We should sync EITR value calculation with ixgbe.c? */
2154
2155 for (int i = 0; i < adapter->num_queues; i++, que++) {
2156 /* First the RX queue entry */
2157 ixv_set_ivar(adapter, i, que->msix, 0);
2158 /* ... and the TX */
2159 ixv_set_ivar(adapter, i, que->msix, 1);
2160 /* Set an initial value in EITR */
2161 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2162 }
2163
2164 /* For the mailbox interrupt */
2165 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2166 } /* ixv_configure_ivars */
2167
2168
2169 /************************************************************************
2170 * ixv_save_stats
2171 *
2172 * The VF stats registers never have a truly virgin
2173 * starting point, so this routine tries to make an
2174 * artificial one, marking ground zero on attach as
2175 * it were.
2176 ************************************************************************/
2177 static void
2178 ixv_save_stats(struct adapter *adapter)
2179 {
2180 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2181
2182 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2183 stats->saved_reset_vfgprc +=
2184 stats->vfgprc.ev_count - stats->base_vfgprc;
2185 stats->saved_reset_vfgptc +=
2186 stats->vfgptc.ev_count - stats->base_vfgptc;
2187 stats->saved_reset_vfgorc +=
2188 stats->vfgorc.ev_count - stats->base_vfgorc;
2189 stats->saved_reset_vfgotc +=
2190 stats->vfgotc.ev_count - stats->base_vfgotc;
2191 stats->saved_reset_vfmprc +=
2192 stats->vfmprc.ev_count - stats->base_vfmprc;
2193 }
2194 } /* ixv_save_stats */
2195
2196 /************************************************************************
2197 * ixv_init_stats
2198 ************************************************************************/
2199 static void
2200 ixv_init_stats(struct adapter *adapter)
2201 {
2202 struct ixgbe_hw *hw = &adapter->hw;
2203
2204 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2205 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2206 adapter->stats.vf.last_vfgorc |=
2207 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2208
2209 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2210 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2211 adapter->stats.vf.last_vfgotc |=
2212 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2213
2214 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2215
2216 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2217 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2218 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2219 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2220 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2221 } /* ixv_init_stats */
2222
2223 #define UPDATE_STAT_32(reg, last, count) \
2224 { \
2225 u32 current = IXGBE_READ_REG(hw, (reg)); \
2226 if (current < (last)) \
2227 count.ev_count += 0x100000000LL; \
2228 (last) = current; \
2229 count.ev_count &= 0xFFFFFFFF00000000LL; \
2230 count.ev_count |= current; \
2231 }
2232
2233 #define UPDATE_STAT_36(lsb, msb, last, count) \
2234 { \
2235 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2236 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2237 u64 current = ((cur_msb << 32) | cur_lsb); \
2238 if (current < (last)) \
2239 count.ev_count += 0x1000000000LL; \
2240 (last) = current; \
2241 count.ev_count &= 0xFFFFFFF000000000LL; \
2242 count.ev_count |= current; \
2243 }
2244
2245 /************************************************************************
2246 * ixv_update_stats - Update the board statistics counters.
2247 ************************************************************************/
2248 void
2249 ixv_update_stats(struct adapter *adapter)
2250 {
2251 struct ixgbe_hw *hw = &adapter->hw;
2252 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2253
2254 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2255 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2256 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2257 stats->vfgorc);
2258 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2259 stats->vfgotc);
2260 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2261
2262 /* Fill out the OS statistics structure */
2263 /*
2264 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2265 * adapter->stats counters. It's required to make ifconfig -z
2266 * (SOICZIFDATA) work.
2267 */
2268 } /* ixv_update_stats */
2269
2270 /************************************************************************
2271 * ixv_sysctl_interrupt_rate_handler
2272 ************************************************************************/
2273 static int
2274 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2275 {
2276 struct sysctlnode node = *rnode;
2277 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2278 struct adapter *adapter = que->adapter;
2279 uint32_t reg, usec, rate;
2280 int error;
2281
2282 if (que == NULL)
2283 return 0;
2284 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2285 usec = ((reg & 0x0FF8) >> 3);
2286 if (usec > 0)
2287 rate = 500000 / usec;
2288 else
2289 rate = 0;
2290 node.sysctl_data = &rate;
2291 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2292 if (error || newp == NULL)
2293 return error;
2294 reg &= ~0xfff; /* default, no limitation */
2295 if (rate > 0 && rate < 500000) {
2296 if (rate < 1000)
2297 rate = 1000;
2298 reg |= ((4000000/rate) & 0xff8);
2299 /*
2300 * When RSC is used, ITR interval must be larger than
2301 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2302 * The minimum value is always greater than 2us on 100M
2303 * (and 10M?(not documented)), but it's not on 1G and higher.
2304 */
2305 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2306 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2307 if ((adapter->num_queues > 1)
2308 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2309 return EINVAL;
2310 }
2311 ixv_max_interrupt_rate = rate;
2312 } else
2313 ixv_max_interrupt_rate = 0;
2314 ixv_eitr_write(adapter, que->msix, reg);
2315
2316 return (0);
2317 } /* ixv_sysctl_interrupt_rate_handler */
2318
2319 const struct sysctlnode *
2320 ixv_sysctl_instance(struct adapter *adapter)
2321 {
2322 const char *dvname;
2323 struct sysctllog **log;
2324 int rc;
2325 const struct sysctlnode *rnode;
2326
2327 log = &adapter->sysctllog;
2328 dvname = device_xname(adapter->dev);
2329
2330 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2331 0, CTLTYPE_NODE, dvname,
2332 SYSCTL_DESCR("ixv information and settings"),
2333 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2334 goto err;
2335
2336 return rnode;
2337 err:
2338 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2339 return NULL;
2340 }
2341
2342 static void
2343 ixv_add_device_sysctls(struct adapter *adapter)
2344 {
2345 struct sysctllog **log;
2346 const struct sysctlnode *rnode, *cnode;
2347 device_t dev;
2348
2349 dev = adapter->dev;
2350 log = &adapter->sysctllog;
2351
2352 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2353 aprint_error_dev(dev, "could not create sysctl root\n");
2354 return;
2355 }
2356
2357 if (sysctl_createv(log, 0, &rnode, &cnode,
2358 CTLFLAG_READWRITE, CTLTYPE_INT,
2359 "debug", SYSCTL_DESCR("Debug Info"),
2360 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2361 aprint_error_dev(dev, "could not create sysctl\n");
2362
2363 if (sysctl_createv(log, 0, &rnode, &cnode,
2364 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2365 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2366 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2367 aprint_error_dev(dev, "could not create sysctl\n");
2368
2369 if (sysctl_createv(log, 0, &rnode, &cnode,
2370 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2371 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2372 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2373 aprint_error_dev(dev, "could not create sysctl\n");
2374 }
2375
2376 /************************************************************************
2377 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2378 ************************************************************************/
2379 static void
2380 ixv_add_stats_sysctls(struct adapter *adapter)
2381 {
2382 device_t dev = adapter->dev;
2383 struct tx_ring *txr = adapter->tx_rings;
2384 struct rx_ring *rxr = adapter->rx_rings;
2385 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2386 struct ixgbe_hw *hw = &adapter->hw;
2387 const struct sysctlnode *rnode, *cnode;
2388 struct sysctllog **log = &adapter->sysctllog;
2389 const char *xname = device_xname(dev);
2390
2391 /* Driver Statistics */
2392 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2393 NULL, xname, "Driver tx dma soft fail EFBIG");
2394 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2395 NULL, xname, "m_defrag() failed");
2396 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2397 NULL, xname, "Driver tx dma hard fail EFBIG");
2398 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2399 NULL, xname, "Driver tx dma hard fail EINVAL");
2400 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2401 NULL, xname, "Driver tx dma hard fail other");
2402 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2403 NULL, xname, "Driver tx dma soft fail EAGAIN");
2404 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2405 NULL, xname, "Driver tx dma soft fail ENOMEM");
2406 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2407 NULL, xname, "Watchdog timeouts");
2408 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2409 NULL, xname, "TSO errors");
2410 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2411 NULL, xname, "Link MSI-X IRQ Handled");
2412
2413 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2414 snprintf(adapter->queues[i].evnamebuf,
2415 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2416 xname, i);
2417 snprintf(adapter->queues[i].namebuf,
2418 sizeof(adapter->queues[i].namebuf), "q%d", i);
2419
2420 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2421 aprint_error_dev(dev, "could not create sysctl root\n");
2422 break;
2423 }
2424
2425 if (sysctl_createv(log, 0, &rnode, &rnode,
2426 0, CTLTYPE_NODE,
2427 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2428 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2429 break;
2430
2431 if (sysctl_createv(log, 0, &rnode, &cnode,
2432 CTLFLAG_READWRITE, CTLTYPE_INT,
2433 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2434 ixv_sysctl_interrupt_rate_handler, 0,
2435 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2436 break;
2437
2438 if (sysctl_createv(log, 0, &rnode, &cnode,
2439 CTLFLAG_READONLY, CTLTYPE_INT,
2440 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2441 ixv_sysctl_tdh_handler, 0, (void *)txr,
2442 0, CTL_CREATE, CTL_EOL) != 0)
2443 break;
2444
2445 if (sysctl_createv(log, 0, &rnode, &cnode,
2446 CTLFLAG_READONLY, CTLTYPE_INT,
2447 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2448 ixv_sysctl_tdt_handler, 0, (void *)txr,
2449 0, CTL_CREATE, CTL_EOL) != 0)
2450 break;
2451
2452 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2453 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2454 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2455 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2456 "Handled queue in softint");
2457 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2458 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2459 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2460 NULL, adapter->queues[i].evnamebuf, "TSO");
2461 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2462 NULL, adapter->queues[i].evnamebuf,
2463 "Queue No Descriptor Available");
2464 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2465 NULL, adapter->queues[i].evnamebuf,
2466 "Queue Packets Transmitted");
2467 #ifndef IXGBE_LEGACY_TX
2468 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2469 NULL, adapter->queues[i].evnamebuf,
2470 "Packets dropped in pcq");
2471 #endif
2472
2473 #ifdef LRO
2474 struct lro_ctrl *lro = &rxr->lro;
2475 #endif /* LRO */
2476
2477 if (sysctl_createv(log, 0, &rnode, &cnode,
2478 CTLFLAG_READONLY,
2479 CTLTYPE_INT,
2480 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2481 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2482 CTL_CREATE, CTL_EOL) != 0)
2483 break;
2484
2485 if (sysctl_createv(log, 0, &rnode, &cnode,
2486 CTLFLAG_READONLY,
2487 CTLTYPE_INT,
2488 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2489 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2490 CTL_CREATE, CTL_EOL) != 0)
2491 break;
2492
2493 if (sysctl_createv(log, 0, &rnode, &cnode,
2494 CTLFLAG_READONLY,
2495 CTLTYPE_INT,
2496 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2497 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2498 CTL_CREATE, CTL_EOL) != 0)
2499 break;
2500
2501 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2502 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2503 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2504 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2505 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2506 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2507 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2508 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2509 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2510 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2511 #ifdef LRO
2512 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2513 CTLFLAG_RD, &lro->lro_queued, 0,
2514 "LRO Queued");
2515 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2516 CTLFLAG_RD, &lro->lro_flushed, 0,
2517 "LRO Flushed");
2518 #endif /* LRO */
2519 }
2520
2521 /* MAC stats get their own sub node */
2522
2523 snprintf(stats->namebuf,
2524 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2525
2526 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2527 stats->namebuf, "rx csum offload - IP");
2528 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2529 stats->namebuf, "rx csum offload - L4");
2530 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2531 stats->namebuf, "rx csum offload - IP bad");
2532 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2533 stats->namebuf, "rx csum offload - L4 bad");
2534
2535 /* Packet Reception Stats */
2536 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2537 xname, "Good Packets Received");
2538 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2539 xname, "Good Octets Received");
2540 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2541 xname, "Multicast Packets Received");
2542 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2543 xname, "Good Packets Transmitted");
2544 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2545 xname, "Good Octets Transmitted");
2546
2547 /* Mailbox Stats */
2548 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2549 xname, "message TXs");
2550 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2551 xname, "message RXs");
2552 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2553 xname, "ACKs");
2554 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2555 xname, "REQs");
2556 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2557 xname, "RSTs");
2558
2559 } /* ixv_add_stats_sysctls */
2560
2561 /************************************************************************
2562 * ixv_set_sysctl_value
2563 ************************************************************************/
2564 static void
2565 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2566 const char *description, int *limit, int value)
2567 {
2568 device_t dev = adapter->dev;
2569 struct sysctllog **log;
2570 const struct sysctlnode *rnode, *cnode;
2571
2572 log = &adapter->sysctllog;
2573 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2574 aprint_error_dev(dev, "could not create sysctl root\n");
2575 return;
2576 }
2577 if (sysctl_createv(log, 0, &rnode, &cnode,
2578 CTLFLAG_READWRITE, CTLTYPE_INT,
2579 name, SYSCTL_DESCR(description),
2580 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2581 aprint_error_dev(dev, "could not create sysctl\n");
2582 *limit = value;
2583 } /* ixv_set_sysctl_value */
2584
2585 /************************************************************************
2586 * ixv_print_debug_info
2587 *
2588 * Called only when em_display_debug_stats is enabled.
2589 * Provides a way to take a look at important statistics
2590 * maintained by the driver and hardware.
2591 ************************************************************************/
2592 static void
2593 ixv_print_debug_info(struct adapter *adapter)
2594 {
2595 device_t dev = adapter->dev;
2596 struct ixgbe_hw *hw = &adapter->hw;
2597 struct ix_queue *que = adapter->queues;
2598 struct rx_ring *rxr;
2599 struct tx_ring *txr;
2600 #ifdef LRO
2601 struct lro_ctrl *lro;
2602 #endif /* LRO */
2603
2604 device_printf(dev, "Error Byte Count = %u \n",
2605 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2606
2607 for (int i = 0; i < adapter->num_queues; i++, que++) {
2608 txr = que->txr;
2609 rxr = que->rxr;
2610 #ifdef LRO
2611 lro = &rxr->lro;
2612 #endif /* LRO */
2613 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2614 que->msix, (long)que->irqs.ev_count);
2615 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2616 rxr->me, (long long)rxr->rx_packets.ev_count);
2617 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2618 rxr->me, (long)rxr->rx_bytes.ev_count);
2619 #ifdef LRO
2620 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2621 rxr->me, (long long)lro->lro_queued);
2622 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2623 rxr->me, (long long)lro->lro_flushed);
2624 #endif /* LRO */
2625 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2626 txr->me, (long)txr->total_packets.ev_count);
2627 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2628 txr->me, (long)txr->no_desc_avail.ev_count);
2629 }
2630
2631 device_printf(dev, "MBX IRQ Handled: %lu\n",
2632 (long)adapter->link_irq.ev_count);
2633 } /* ixv_print_debug_info */
2634
2635 /************************************************************************
2636 * ixv_sysctl_debug
2637 ************************************************************************/
2638 static int
2639 ixv_sysctl_debug(SYSCTLFN_ARGS)
2640 {
2641 struct sysctlnode node = *rnode;
2642 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2643 int error, result;
2644
2645 node.sysctl_data = &result;
2646 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2647
2648 if (error || newp == NULL)
2649 return error;
2650
2651 if (result == 1)
2652 ixv_print_debug_info(adapter);
2653
2654 return 0;
2655 } /* ixv_sysctl_debug */
2656
2657 /************************************************************************
2658 * ixv_init_device_features
2659 ************************************************************************/
2660 static void
2661 ixv_init_device_features(struct adapter *adapter)
2662 {
2663 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2664 | IXGBE_FEATURE_VF
2665 | IXGBE_FEATURE_RSS
2666 | IXGBE_FEATURE_LEGACY_TX;
2667
2668 /* A tad short on feature flags for VFs, atm. */
2669 switch (adapter->hw.mac.type) {
2670 case ixgbe_mac_82599_vf:
2671 break;
2672 case ixgbe_mac_X540_vf:
2673 break;
2674 case ixgbe_mac_X550_vf:
2675 case ixgbe_mac_X550EM_x_vf:
2676 case ixgbe_mac_X550EM_a_vf:
2677 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2678 break;
2679 default:
2680 break;
2681 }
2682
2683 /* Enabled by default... */
2684 /* Is a virtual function (VF) */
2685 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2686 adapter->feat_en |= IXGBE_FEATURE_VF;
2687 /* Netmap */
2688 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2689 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2690 /* Receive-Side Scaling (RSS) */
2691 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2692 adapter->feat_en |= IXGBE_FEATURE_RSS;
2693 /* Needs advanced context descriptor regardless of offloads req'd */
2694 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2695 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2696
2697 /* Enabled via sysctl... */
2698 /* Legacy (single queue) transmit */
2699 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2700 ixv_enable_legacy_tx)
2701 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2702 } /* ixv_init_device_features */
2703
2704 /************************************************************************
2705 * ixv_shutdown - Shutdown entry point
2706 ************************************************************************/
2707 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2708 static int
2709 ixv_shutdown(device_t dev)
2710 {
2711 struct adapter *adapter = device_private(dev);
2712 IXGBE_CORE_LOCK(adapter);
2713 ixv_stop(adapter);
2714 IXGBE_CORE_UNLOCK(adapter);
2715
2716 return (0);
2717 } /* ixv_shutdown */
2718 #endif
2719
2720 static int
2721 ixv_ifflags_cb(struct ethercom *ec)
2722 {
2723 struct ifnet *ifp = &ec->ec_if;
2724 struct adapter *adapter = ifp->if_softc;
2725 int change, rc = 0;
2726
2727 IXGBE_CORE_LOCK(adapter);
2728
2729 change = ifp->if_flags ^ adapter->if_flags;
2730 if (change != 0)
2731 adapter->if_flags = ifp->if_flags;
2732
2733 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
2734 rc = ENETRESET;
2735
2736 /* Set up VLAN support and filter */
2737 ixv_setup_vlan_support(adapter);
2738
2739 IXGBE_CORE_UNLOCK(adapter);
2740
2741 return rc;
2742 }
2743
2744
2745 /************************************************************************
2746 * ixv_ioctl - Ioctl entry point
2747 *
2748 * Called when the user wants to configure the interface.
2749 *
2750 * return 0 on success, positive on failure
2751 ************************************************************************/
2752 static int
2753 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2754 {
2755 struct adapter *adapter = ifp->if_softc;
2756 struct ifcapreq *ifcr = data;
2757 struct ifreq *ifr = data;
2758 int error = 0;
2759 int l4csum_en;
2760 const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
2761 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
2762
2763 switch (command) {
2764 case SIOCSIFFLAGS:
2765 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2766 break;
2767 case SIOCADDMULTI:
2768 case SIOCDELMULTI:
2769 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2770 break;
2771 case SIOCSIFMEDIA:
2772 case SIOCGIFMEDIA:
2773 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2774 break;
2775 case SIOCSIFCAP:
2776 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2777 break;
2778 case SIOCSIFMTU:
2779 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2780 break;
2781 default:
2782 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2783 break;
2784 }
2785
2786 switch (command) {
2787 case SIOCSIFMEDIA:
2788 case SIOCGIFMEDIA:
2789 return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2790 case SIOCSIFCAP:
2791 /* Layer-4 Rx checksum offload has to be turned on and
2792 * off as a unit.
2793 */
2794 l4csum_en = ifcr->ifcr_capenable & l4csum;
2795 if (l4csum_en != l4csum && l4csum_en != 0)
2796 return EINVAL;
2797 /*FALLTHROUGH*/
2798 case SIOCADDMULTI:
2799 case SIOCDELMULTI:
2800 case SIOCSIFFLAGS:
2801 case SIOCSIFMTU:
2802 default:
2803 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2804 return error;
2805 if ((ifp->if_flags & IFF_RUNNING) == 0)
2806 ;
2807 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2808 IXGBE_CORE_LOCK(adapter);
2809 ixv_init_locked(adapter);
2810 IXGBE_CORE_UNLOCK(adapter);
2811 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2812 /*
2813 * Multicast list has changed; set the hardware filter
2814 * accordingly.
2815 */
2816 IXGBE_CORE_LOCK(adapter);
2817 ixv_disable_intr(adapter);
2818 ixv_set_multi(adapter);
2819 ixv_enable_intr(adapter);
2820 IXGBE_CORE_UNLOCK(adapter);
2821 }
2822 return 0;
2823 }
2824 } /* ixv_ioctl */
2825
2826 /************************************************************************
2827 * ixv_init
2828 ************************************************************************/
2829 static int
2830 ixv_init(struct ifnet *ifp)
2831 {
2832 struct adapter *adapter = ifp->if_softc;
2833
2834 IXGBE_CORE_LOCK(adapter);
2835 ixv_init_locked(adapter);
2836 IXGBE_CORE_UNLOCK(adapter);
2837
2838 return 0;
2839 } /* ixv_init */
2840
2841 /************************************************************************
2842 * ixv_handle_que
2843 ************************************************************************/
2844 static void
2845 ixv_handle_que(void *context)
2846 {
2847 struct ix_queue *que = context;
2848 struct adapter *adapter = que->adapter;
2849 struct tx_ring *txr = que->txr;
2850 struct ifnet *ifp = adapter->ifp;
2851 bool more;
2852
2853 que->handleq.ev_count++;
2854
2855 if (ifp->if_flags & IFF_RUNNING) {
2856 more = ixgbe_rxeof(que);
2857 IXGBE_TX_LOCK(txr);
2858 more |= ixgbe_txeof(txr);
2859 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2860 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2861 ixgbe_mq_start_locked(ifp, txr);
2862 /* Only for queue 0 */
2863 /* NetBSD still needs this for CBQ */
2864 if ((&adapter->queues[0] == que)
2865 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2866 ixgbe_legacy_start_locked(ifp, txr);
2867 IXGBE_TX_UNLOCK(txr);
2868 if (more) {
2869 que->req.ev_count++;
2870 if (adapter->txrx_use_workqueue) {
2871 /*
2872 * "enqueued flag" is not required here
2873 * the same as ixg(4). See ixgbe_msix_que().
2874 */
2875 workqueue_enqueue(adapter->que_wq,
2876 &que->wq_cookie, curcpu());
2877 } else
2878 softint_schedule(que->que_si);
2879 return;
2880 }
2881 }
2882
2883 /* Re-enable this interrupt */
2884 ixv_enable_queue(adapter, que->msix);
2885
2886 return;
2887 } /* ixv_handle_que */
2888
2889 /************************************************************************
2890 * ixv_handle_que_work
2891 ************************************************************************/
2892 static void
2893 ixv_handle_que_work(struct work *wk, void *context)
2894 {
2895 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2896
2897 /*
2898 * "enqueued flag" is not required here the same as ixg(4).
2899 * See ixgbe_msix_que().
2900 */
2901 ixv_handle_que(que);
2902 }
2903
2904 /************************************************************************
2905 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2906 ************************************************************************/
2907 static int
2908 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2909 {
2910 device_t dev = adapter->dev;
2911 struct ix_queue *que = adapter->queues;
2912 struct tx_ring *txr = adapter->tx_rings;
2913 int error, msix_ctrl, rid, vector = 0;
2914 pci_chipset_tag_t pc;
2915 pcitag_t tag;
2916 char intrbuf[PCI_INTRSTR_LEN];
2917 char wqname[MAXCOMLEN];
2918 char intr_xname[32];
2919 const char *intrstr = NULL;
2920 kcpuset_t *affinity;
2921 int cpu_id = 0;
2922
2923 pc = adapter->osdep.pc;
2924 tag = adapter->osdep.tag;
2925
2926 adapter->osdep.nintrs = adapter->num_queues + 1;
2927 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2928 adapter->osdep.nintrs) != 0) {
2929 aprint_error_dev(dev,
2930 "failed to allocate MSI-X interrupt\n");
2931 return (ENXIO);
2932 }
2933
2934 kcpuset_create(&affinity, false);
2935 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2936 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2937 device_xname(dev), i);
2938 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2939 sizeof(intrbuf));
2940 #ifdef IXGBE_MPSAFE
2941 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
2942 true);
2943 #endif
2944 /* Set the handler function */
2945 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
2946 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
2947 intr_xname);
2948 if (que->res == NULL) {
2949 pci_intr_release(pc, adapter->osdep.intrs,
2950 adapter->osdep.nintrs);
2951 aprint_error_dev(dev,
2952 "Failed to register QUE handler\n");
2953 kcpuset_destroy(affinity);
2954 return (ENXIO);
2955 }
2956 que->msix = vector;
2957 adapter->active_queues |= (u64)(1 << que->msix);
2958
2959 cpu_id = i;
2960 /* Round-robin affinity */
2961 kcpuset_zero(affinity);
2962 kcpuset_set(affinity, cpu_id % ncpu);
2963 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
2964 NULL);
2965 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
2966 intrstr);
2967 if (error == 0)
2968 aprint_normal(", bound queue %d to cpu %d\n",
2969 i, cpu_id % ncpu);
2970 else
2971 aprint_normal("\n");
2972
2973 #ifndef IXGBE_LEGACY_TX
2974 txr->txr_si
2975 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2976 ixgbe_deferred_mq_start, txr);
2977 #endif
2978 que->que_si
2979 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
2980 ixv_handle_que, que);
2981 if (que->que_si == NULL) {
2982 aprint_error_dev(dev,
2983 "could not establish software interrupt\n");
2984 }
2985 }
2986 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
2987 error = workqueue_create(&adapter->txr_wq, wqname,
2988 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2989 IXGBE_WORKQUEUE_FLAGS);
2990 if (error) {
2991 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
2992 }
2993 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
2994
2995 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
2996 error = workqueue_create(&adapter->que_wq, wqname,
2997 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
2998 IXGBE_WORKQUEUE_FLAGS);
2999 if (error) {
3000 aprint_error_dev(dev,
3001 "couldn't create workqueue\n");
3002 }
3003
3004 /* and Mailbox */
3005 cpu_id++;
3006 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3007 adapter->vector = vector;
3008 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3009 sizeof(intrbuf));
3010 #ifdef IXGBE_MPSAFE
3011 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3012 true);
3013 #endif
3014 /* Set the mbx handler function */
3015 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3016 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3017 intr_xname);
3018 if (adapter->osdep.ihs[vector] == NULL) {
3019 aprint_error_dev(dev, "Failed to register LINK handler\n");
3020 kcpuset_destroy(affinity);
3021 return (ENXIO);
3022 }
3023 /* Round-robin affinity */
3024 kcpuset_zero(affinity);
3025 kcpuset_set(affinity, cpu_id % ncpu);
3026 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
3027
3028 aprint_normal_dev(dev,
3029 "for link, interrupting at %s", intrstr);
3030 if (error == 0)
3031 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3032 else
3033 aprint_normal("\n");
3034
3035 /* Tasklets for Mailbox */
3036 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
3037 ixv_handle_link, adapter);
3038 /*
3039 * Due to a broken design QEMU will fail to properly
3040 * enable the guest for MSI-X unless the vectors in
3041 * the table are all set up, so we must rewrite the
3042 * ENABLE in the MSI-X control register again at this
3043 * point to cause it to successfully initialize us.
3044 */
3045 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3046 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3047 rid += PCI_MSIX_CTL;
3048 msix_ctrl = pci_conf_read(pc, tag, rid);
3049 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3050 pci_conf_write(pc, tag, rid, msix_ctrl);
3051 }
3052
3053 kcpuset_destroy(affinity);
3054 return (0);
3055 } /* ixv_allocate_msix */
3056
3057 /************************************************************************
3058 * ixv_configure_interrupts - Setup MSI-X resources
3059 *
3060 * Note: The VF device MUST use MSI-X, there is no fallback.
3061 ************************************************************************/
3062 static int
3063 ixv_configure_interrupts(struct adapter *adapter)
3064 {
3065 device_t dev = adapter->dev;
3066 int want, queues, msgs;
3067
3068 /* Must have at least 2 MSI-X vectors */
3069 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3070 if (msgs < 2) {
3071 aprint_error_dev(dev, "MSIX config error\n");
3072 return (ENXIO);
3073 }
3074 msgs = MIN(msgs, IXG_MAX_NINTR);
3075
3076 /* Figure out a reasonable auto config value */
3077 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3078
3079 if (ixv_num_queues != 0)
3080 queues = ixv_num_queues;
3081 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3082 queues = IXGBE_VF_MAX_TX_QUEUES;
3083
3084 /*
3085 * Want vectors for the queues,
3086 * plus an additional for mailbox.
3087 */
3088 want = queues + 1;
3089 if (msgs >= want)
3090 msgs = want;
3091 else {
3092 aprint_error_dev(dev,
3093 "MSI-X Configuration Problem, "
3094 "%d vectors but %d queues wanted!\n",
3095 msgs, want);
3096 return -1;
3097 }
3098
3099 adapter->msix_mem = (void *)1; /* XXX */
3100 aprint_normal_dev(dev,
3101 "Using MSI-X interrupts with %d vectors\n", msgs);
3102 adapter->num_queues = queues;
3103
3104 return (0);
3105 } /* ixv_configure_interrupts */
3106
3107
3108 /************************************************************************
3109 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3110 *
3111 * Done outside of interrupt context since the driver might sleep
3112 ************************************************************************/
3113 static void
3114 ixv_handle_link(void *context)
3115 {
3116 struct adapter *adapter = context;
3117
3118 IXGBE_CORE_LOCK(adapter);
3119
3120 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3121 &adapter->link_up, FALSE);
3122 ixv_update_link_status(adapter);
3123
3124 IXGBE_CORE_UNLOCK(adapter);
3125 } /* ixv_handle_link */
3126
3127 /************************************************************************
3128 * ixv_check_link - Used in the local timer to poll for link changes
3129 ************************************************************************/
3130 static void
3131 ixv_check_link(struct adapter *adapter)
3132 {
3133
3134 KASSERT(mutex_owned(&adapter->core_mtx));
3135
3136 adapter->hw.mac.get_link_status = TRUE;
3137
3138 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3139 &adapter->link_up, FALSE);
3140 ixv_update_link_status(adapter);
3141 } /* ixv_check_link */
3142